Showing preview only (4,734K chars total). Download the full file or copy to clipboard to get everything.
Repository: bytedance/deer-flow
Branch: main
Commit: fe75cb35caa4
Files: 603
Total size: 4.4 MB
Directory structure:
gitextract_d9b2_eay/
├── .dockerignore
├── .gitattributes
├── .github/
│ ├── ISSUE_TEMPLATE/
│ │ └── runtime-information.yml
│ ├── copilot-instructions.md
│ └── workflows/
│ └── backend-unit-tests.yml
├── .gitignore
├── CONTRIBUTING.md
├── LICENSE
├── Makefile
├── README.md
├── README_ja.md
├── README_zh.md
├── SECURITY.md
├── backend/
│ ├── .gitignore
│ ├── .python-version
│ ├── AGENTS.md
│ ├── CLAUDE.md
│ ├── CONTRIBUTING.md
│ ├── Dockerfile
│ ├── Makefile
│ ├── README.md
│ ├── app/
│ │ ├── __init__.py
│ │ ├── channels/
│ │ │ ├── __init__.py
│ │ │ ├── base.py
│ │ │ ├── feishu.py
│ │ │ ├── manager.py
│ │ │ ├── message_bus.py
│ │ │ ├── service.py
│ │ │ ├── slack.py
│ │ │ ├── store.py
│ │ │ └── telegram.py
│ │ └── gateway/
│ │ ├── __init__.py
│ │ ├── app.py
│ │ ├── config.py
│ │ ├── path_utils.py
│ │ └── routers/
│ │ ├── __init__.py
│ │ ├── agents.py
│ │ ├── artifacts.py
│ │ ├── channels.py
│ │ ├── mcp.py
│ │ ├── memory.py
│ │ ├── models.py
│ │ ├── skills.py
│ │ ├── suggestions.py
│ │ └── uploads.py
│ ├── debug.py
│ ├── docs/
│ │ ├── API.md
│ │ ├── APPLE_CONTAINER.md
│ │ ├── ARCHITECTURE.md
│ │ ├── AUTO_TITLE_GENERATION.md
│ │ ├── CONFIGURATION.md
│ │ ├── FILE_UPLOAD.md
│ │ ├── HARNESS_APP_SPLIT.md
│ │ ├── MCP_SERVER.md
│ │ ├── MEMORY_IMPROVEMENTS.md
│ │ ├── MEMORY_IMPROVEMENTS_SUMMARY.md
│ │ ├── PATH_EXAMPLES.md
│ │ ├── README.md
│ │ ├── SETUP.md
│ │ ├── TITLE_GENERATION_IMPLEMENTATION.md
│ │ ├── TODO.md
│ │ ├── plan_mode_usage.md
│ │ ├── summarization.md
│ │ └── task_tool_improvements.md
│ ├── langgraph.json
│ ├── packages/
│ │ └── harness/
│ │ ├── deerflow/
│ │ │ ├── __init__.py
│ │ │ ├── agents/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── checkpointer/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── async_provider.py
│ │ │ │ │ └── provider.py
│ │ │ │ ├── lead_agent/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── agent.py
│ │ │ │ │ └── prompt.py
│ │ │ │ ├── memory/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── prompt.py
│ │ │ │ │ ├── queue.py
│ │ │ │ │ └── updater.py
│ │ │ │ ├── middlewares/
│ │ │ │ │ ├── clarification_middleware.py
│ │ │ │ │ ├── dangling_tool_call_middleware.py
│ │ │ │ │ ├── deferred_tool_filter_middleware.py
│ │ │ │ │ ├── loop_detection_middleware.py
│ │ │ │ │ ├── memory_middleware.py
│ │ │ │ │ ├── subagent_limit_middleware.py
│ │ │ │ │ ├── thread_data_middleware.py
│ │ │ │ │ ├── title_middleware.py
│ │ │ │ │ ├── todo_middleware.py
│ │ │ │ │ ├── tool_error_handling_middleware.py
│ │ │ │ │ ├── uploads_middleware.py
│ │ │ │ │ └── view_image_middleware.py
│ │ │ │ └── thread_state.py
│ │ │ ├── client.py
│ │ │ ├── community/
│ │ │ │ ├── aio_sandbox/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── aio_sandbox.py
│ │ │ │ │ ├── aio_sandbox_provider.py
│ │ │ │ │ ├── backend.py
│ │ │ │ │ ├── local_backend.py
│ │ │ │ │ ├── remote_backend.py
│ │ │ │ │ └── sandbox_info.py
│ │ │ │ ├── firecrawl/
│ │ │ │ │ └── tools.py
│ │ │ │ ├── image_search/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── tools.py
│ │ │ │ ├── infoquest/
│ │ │ │ │ ├── infoquest_client.py
│ │ │ │ │ └── tools.py
│ │ │ │ ├── jina_ai/
│ │ │ │ │ ├── jina_client.py
│ │ │ │ │ └── tools.py
│ │ │ │ └── tavily/
│ │ │ │ └── tools.py
│ │ │ ├── config/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── agents_config.py
│ │ │ │ ├── app_config.py
│ │ │ │ ├── checkpointer_config.py
│ │ │ │ ├── extensions_config.py
│ │ │ │ ├── memory_config.py
│ │ │ │ ├── model_config.py
│ │ │ │ ├── paths.py
│ │ │ │ ├── sandbox_config.py
│ │ │ │ ├── skills_config.py
│ │ │ │ ├── subagents_config.py
│ │ │ │ ├── summarization_config.py
│ │ │ │ ├── title_config.py
│ │ │ │ ├── tool_config.py
│ │ │ │ ├── tool_search_config.py
│ │ │ │ └── tracing_config.py
│ │ │ ├── mcp/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── cache.py
│ │ │ │ ├── client.py
│ │ │ │ ├── oauth.py
│ │ │ │ └── tools.py
│ │ │ ├── models/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── claude_provider.py
│ │ │ │ ├── credential_loader.py
│ │ │ │ ├── factory.py
│ │ │ │ ├── openai_codex_provider.py
│ │ │ │ ├── patched_deepseek.py
│ │ │ │ └── patched_minimax.py
│ │ │ ├── reflection/
│ │ │ │ ├── __init__.py
│ │ │ │ └── resolvers.py
│ │ │ ├── sandbox/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── exceptions.py
│ │ │ │ ├── local/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── list_dir.py
│ │ │ │ │ ├── local_sandbox.py
│ │ │ │ │ └── local_sandbox_provider.py
│ │ │ │ ├── middleware.py
│ │ │ │ ├── sandbox.py
│ │ │ │ ├── sandbox_provider.py
│ │ │ │ └── tools.py
│ │ │ ├── skills/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── loader.py
│ │ │ │ ├── parser.py
│ │ │ │ ├── types.py
│ │ │ │ └── validation.py
│ │ │ ├── subagents/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── builtins/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── bash_agent.py
│ │ │ │ │ └── general_purpose.py
│ │ │ │ ├── config.py
│ │ │ │ ├── executor.py
│ │ │ │ └── registry.py
│ │ │ ├── tools/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── builtins/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── clarification_tool.py
│ │ │ │ │ ├── present_file_tool.py
│ │ │ │ │ ├── setup_agent_tool.py
│ │ │ │ │ ├── task_tool.py
│ │ │ │ │ ├── tool_search.py
│ │ │ │ │ └── view_image_tool.py
│ │ │ │ └── tools.py
│ │ │ └── utils/
│ │ │ ├── file_conversion.py
│ │ │ ├── network.py
│ │ │ └── readability.py
│ │ └── pyproject.toml
│ ├── pyproject.toml
│ ├── ruff.toml
│ └── tests/
│ ├── conftest.py
│ ├── test_app_config_reload.py
│ ├── test_artifacts_router.py
│ ├── test_channel_file_attachments.py
│ ├── test_channels.py
│ ├── test_checkpointer.py
│ ├── test_checkpointer_none_fix.py
│ ├── test_cli_auth_providers.py
│ ├── test_client.py
│ ├── test_client_live.py
│ ├── test_config_version.py
│ ├── test_credential_loader.py
│ ├── test_custom_agent.py
│ ├── test_docker_sandbox_mode_detection.py
│ ├── test_feishu_parser.py
│ ├── test_harness_boundary.py
│ ├── test_infoquest_client.py
│ ├── test_lead_agent_model_resolution.py
│ ├── test_local_sandbox_encoding.py
│ ├── test_loop_detection_middleware.py
│ ├── test_mcp_client_config.py
│ ├── test_mcp_oauth.py
│ ├── test_memory_prompt_injection.py
│ ├── test_memory_updater.py
│ ├── test_memory_upload_filtering.py
│ ├── test_model_config.py
│ ├── test_model_factory.py
│ ├── test_patched_minimax.py
│ ├── test_present_file_tool_core_logic.py
│ ├── test_provisioner_kubeconfig.py
│ ├── test_readability.py
│ ├── test_reflection_resolvers.py
│ ├── test_sandbox_tools_security.py
│ ├── test_serialize_message_content.py
│ ├── test_skills_archive_root.py
│ ├── test_skills_loader.py
│ ├── test_skills_router.py
│ ├── test_subagent_executor.py
│ ├── test_subagent_timeout_config.py
│ ├── test_suggestions_router.py
│ ├── test_task_tool_core_logic.py
│ ├── test_thread_data_middleware.py
│ ├── test_title_generation.py
│ ├── test_title_middleware_core_logic.py
│ ├── test_token_usage.py
│ ├── test_tool_error_handling_middleware.py
│ ├── test_tool_search.py
│ ├── test_tracing_config.py
│ ├── test_uploads_middleware_core_logic.py
│ └── test_uploads_router.py
├── config.example.yaml
├── deer-flow.code-workspace
├── docker/
│ ├── docker-compose-dev.yaml
│ ├── docker-compose.yaml
│ ├── nginx/
│ │ ├── nginx.conf
│ │ └── nginx.local.conf
│ └── provisioner/
│ ├── Dockerfile
│ ├── README.md
│ └── app.py
├── docs/
│ ├── CODE_CHANGE_SUMMARY_BY_FILE.md
│ └── SKILL_NAME_CONFLICT_FIX.md
├── extensions_config.example.json
├── frontend/
│ ├── .gitignore
│ ├── .npmrc
│ ├── AGENTS.md
│ ├── CLAUDE.md
│ ├── Dockerfile
│ ├── Makefile
│ ├── README.md
│ ├── components.json
│ ├── eslint.config.js
│ ├── next.config.js
│ ├── package.json
│ ├── pnpm-workspace.yaml
│ ├── postcss.config.js
│ ├── prettier.config.js
│ ├── public/
│ │ └── demo/
│ │ └── threads/
│ │ ├── 21cfea46-34bd-4aa6-9e1f-3009452fbeb9/
│ │ │ └── thread.json
│ │ ├── 3823e443-4e2b-4679-b496-a9506eae462b/
│ │ │ ├── thread.json
│ │ │ └── user-data/
│ │ │ └── outputs/
│ │ │ └── fei-fei-li-podcast-timeline.md
│ │ ├── 4f3e55ee-f853-43db-bfb3-7d1a411f03cb/
│ │ │ └── thread.json
│ │ ├── 5aa47db1-d0cb-4eb9-aea5-3dac1b371c5a/
│ │ │ ├── thread.json
│ │ │ └── user-data/
│ │ │ └── outputs/
│ │ │ └── jiangsu-football/
│ │ │ ├── css/
│ │ │ │ └── style.css
│ │ │ ├── favicon.html
│ │ │ ├── index.html
│ │ │ └── js/
│ │ │ ├── data.js
│ │ │ └── main.js
│ │ ├── 7cfa5f8f-a2f8-47ad-acbd-da7137baf990/
│ │ │ ├── thread.json
│ │ │ └── user-data/
│ │ │ └── outputs/
│ │ │ ├── index.html
│ │ │ ├── script.js
│ │ │ └── style.css
│ │ ├── 7f9dc56c-e49c-4671-a3d2-c492ff4dce0c/
│ │ │ ├── thread.json
│ │ │ └── user-data/
│ │ │ └── outputs/
│ │ │ └── leica-master-photography-article.md
│ │ ├── 90040b36-7eba-4b97-ba89-02c3ad47a8b9/
│ │ │ └── thread.json
│ │ ├── ad76c455-5bf9-4335-8517-fc03834ab828/
│ │ │ ├── thread.json
│ │ │ └── user-data/
│ │ │ ├── outputs/
│ │ │ │ └── titanic_summary.txt
│ │ │ └── uploads/
│ │ │ └── titanic.csv
│ │ ├── b83fbb2a-4e36-4d82-9de0-7b2a02c2092a/
│ │ │ ├── thread.json
│ │ │ └── user-data/
│ │ │ └── outputs/
│ │ │ └── index.html
│ │ ├── c02bb4d5-4202-490e-ae8f-ff4864fc0d2e/
│ │ │ ├── thread.json
│ │ │ └── user-data/
│ │ │ └── outputs/
│ │ │ ├── index.html
│ │ │ ├── script.js
│ │ │ └── styles.css
│ │ ├── d3e5adaf-084c-4dd5-9d29-94f1d6bccd98/
│ │ │ ├── thread.json
│ │ │ └── user-data/
│ │ │ └── outputs/
│ │ │ └── diana_hu_research.md
│ │ ├── f4125791-0128-402a-8ca9-50e0947557e4/
│ │ │ ├── thread.json
│ │ │ └── user-data/
│ │ │ └── outputs/
│ │ │ └── index.html
│ │ └── fe3f7974-1bcb-4a01-a950-79673baafefd/
│ │ ├── thread.json
│ │ └── user-data/
│ │ └── outputs/
│ │ ├── index.html
│ │ └── research_deerflow_20260201.md
│ ├── scripts/
│ │ └── save-demo.js
│ ├── src/
│ │ ├── app/
│ │ │ ├── api/
│ │ │ │ └── auth/
│ │ │ │ └── [...all]/
│ │ │ │ └── route.ts
│ │ │ ├── layout.tsx
│ │ │ ├── mock/
│ │ │ │ └── api/
│ │ │ │ ├── mcp/
│ │ │ │ │ └── config/
│ │ │ │ │ └── route.ts
│ │ │ │ ├── models/
│ │ │ │ │ └── route.ts
│ │ │ │ ├── skills/
│ │ │ │ │ └── route.ts
│ │ │ │ └── threads/
│ │ │ │ ├── [thread_id]/
│ │ │ │ │ ├── artifacts/
│ │ │ │ │ │ └── [[...artifact_path]]/
│ │ │ │ │ │ └── route.ts
│ │ │ │ │ └── history/
│ │ │ │ │ └── route.ts
│ │ │ │ └── search/
│ │ │ │ └── route.ts
│ │ │ ├── page.tsx
│ │ │ └── workspace/
│ │ │ ├── agents/
│ │ │ │ ├── [agent_name]/
│ │ │ │ │ └── chats/
│ │ │ │ │ └── [thread_id]/
│ │ │ │ │ ├── layout.tsx
│ │ │ │ │ └── page.tsx
│ │ │ │ ├── new/
│ │ │ │ │ └── page.tsx
│ │ │ │ └── page.tsx
│ │ │ ├── chats/
│ │ │ │ ├── [thread_id]/
│ │ │ │ │ ├── layout.tsx
│ │ │ │ │ └── page.tsx
│ │ │ │ └── page.tsx
│ │ │ ├── layout.tsx
│ │ │ └── page.tsx
│ │ ├── components/
│ │ │ ├── ai-elements/
│ │ │ │ ├── artifact.tsx
│ │ │ │ ├── canvas.tsx
│ │ │ │ ├── chain-of-thought.tsx
│ │ │ │ ├── checkpoint.tsx
│ │ │ │ ├── code-block.tsx
│ │ │ │ ├── connection.tsx
│ │ │ │ ├── context.tsx
│ │ │ │ ├── controls.tsx
│ │ │ │ ├── conversation.tsx
│ │ │ │ ├── edge.tsx
│ │ │ │ ├── image.tsx
│ │ │ │ ├── loader.tsx
│ │ │ │ ├── message.tsx
│ │ │ │ ├── model-selector.tsx
│ │ │ │ ├── node.tsx
│ │ │ │ ├── open-in-chat.tsx
│ │ │ │ ├── panel.tsx
│ │ │ │ ├── plan.tsx
│ │ │ │ ├── prompt-input.tsx
│ │ │ │ ├── queue.tsx
│ │ │ │ ├── reasoning.tsx
│ │ │ │ ├── shimmer.tsx
│ │ │ │ ├── sources.tsx
│ │ │ │ ├── suggestion.tsx
│ │ │ │ ├── task.tsx
│ │ │ │ ├── toolbar.tsx
│ │ │ │ └── web-preview.tsx
│ │ │ ├── landing/
│ │ │ │ ├── footer.tsx
│ │ │ │ ├── header.tsx
│ │ │ │ ├── hero.tsx
│ │ │ │ ├── progressive-skills-animation.tsx
│ │ │ │ ├── section.tsx
│ │ │ │ └── sections/
│ │ │ │ ├── case-study-section.tsx
│ │ │ │ ├── community-section.tsx
│ │ │ │ ├── sandbox-section.tsx
│ │ │ │ ├── skills-section.tsx
│ │ │ │ └── whats-new-section.tsx
│ │ │ ├── theme-provider.tsx
│ │ │ ├── ui/
│ │ │ │ ├── alert.tsx
│ │ │ │ ├── aurora-text.tsx
│ │ │ │ ├── avatar.tsx
│ │ │ │ ├── badge.tsx
│ │ │ │ ├── breadcrumb.tsx
│ │ │ │ ├── button-group.tsx
│ │ │ │ ├── button.tsx
│ │ │ │ ├── card.tsx
│ │ │ │ ├── carousel.tsx
│ │ │ │ ├── collapsible.tsx
│ │ │ │ ├── command.tsx
│ │ │ │ ├── confetti-button.tsx
│ │ │ │ ├── dialog.tsx
│ │ │ │ ├── dropdown-menu.tsx
│ │ │ │ ├── empty.tsx
│ │ │ │ ├── flickering-grid.tsx
│ │ │ │ ├── galaxy.css
│ │ │ │ ├── galaxy.jsx
│ │ │ │ ├── hover-card.tsx
│ │ │ │ ├── input-group.tsx
│ │ │ │ ├── input.tsx
│ │ │ │ ├── item.tsx
│ │ │ │ ├── magic-bento.css
│ │ │ │ ├── magic-bento.tsx
│ │ │ │ ├── number-ticker.tsx
│ │ │ │ ├── progress.tsx
│ │ │ │ ├── resizable.tsx
│ │ │ │ ├── scroll-area.tsx
│ │ │ │ ├── select.tsx
│ │ │ │ ├── separator.tsx
│ │ │ │ ├── sheet.tsx
│ │ │ │ ├── shine-border.tsx
│ │ │ │ ├── sidebar.tsx
│ │ │ │ ├── skeleton.tsx
│ │ │ │ ├── sonner.tsx
│ │ │ │ ├── spotlight-card.css
│ │ │ │ ├── spotlight-card.tsx
│ │ │ │ ├── switch.tsx
│ │ │ │ ├── tabs.tsx
│ │ │ │ ├── terminal.tsx
│ │ │ │ ├── textarea.tsx
│ │ │ │ ├── toggle-group.tsx
│ │ │ │ ├── toggle.tsx
│ │ │ │ ├── tooltip.tsx
│ │ │ │ └── word-rotate.tsx
│ │ │ └── workspace/
│ │ │ ├── agent-welcome.tsx
│ │ │ ├── agents/
│ │ │ │ ├── agent-card.tsx
│ │ │ │ └── agent-gallery.tsx
│ │ │ ├── artifacts/
│ │ │ │ ├── artifact-file-detail.tsx
│ │ │ │ ├── artifact-file-list.tsx
│ │ │ │ ├── artifact-trigger.tsx
│ │ │ │ ├── context.tsx
│ │ │ │ └── index.ts
│ │ │ ├── chats/
│ │ │ │ ├── chat-box.tsx
│ │ │ │ ├── index.ts
│ │ │ │ ├── use-chat-mode.ts
│ │ │ │ └── use-thread-chat.ts
│ │ │ ├── citations/
│ │ │ │ ├── artifact-link.tsx
│ │ │ │ └── citation-link.tsx
│ │ │ ├── code-editor.tsx
│ │ │ ├── copy-button.tsx
│ │ │ ├── export-trigger.tsx
│ │ │ ├── flip-display.tsx
│ │ │ ├── github-icon.tsx
│ │ │ ├── input-box.tsx
│ │ │ ├── messages/
│ │ │ │ ├── context.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── markdown-content.tsx
│ │ │ │ ├── message-group.tsx
│ │ │ │ ├── message-list-item.tsx
│ │ │ │ ├── message-list.tsx
│ │ │ │ ├── skeleton.tsx
│ │ │ │ └── subtask-card.tsx
│ │ │ ├── mode-hover-guide.tsx
│ │ │ ├── overscroll.tsx
│ │ │ ├── recent-chat-list.tsx
│ │ │ ├── settings/
│ │ │ │ ├── about-content.ts
│ │ │ │ ├── about-settings-page.tsx
│ │ │ │ ├── about.md
│ │ │ │ ├── appearance-settings-page.tsx
│ │ │ │ ├── index.ts
│ │ │ │ ├── memory-settings-page.tsx
│ │ │ │ ├── notification-settings-page.tsx
│ │ │ │ ├── settings-dialog.tsx
│ │ │ │ ├── settings-section.tsx
│ │ │ │ ├── skill-settings-page.tsx
│ │ │ │ └── tool-settings-page.tsx
│ │ │ ├── streaming-indicator.tsx
│ │ │ ├── thread-title.tsx
│ │ │ ├── todo-list.tsx
│ │ │ ├── tooltip.tsx
│ │ │ ├── welcome.tsx
│ │ │ ├── workspace-container.tsx
│ │ │ ├── workspace-header.tsx
│ │ │ ├── workspace-nav-chat-list.tsx
│ │ │ ├── workspace-nav-menu.tsx
│ │ │ └── workspace-sidebar.tsx
│ │ ├── core/
│ │ │ ├── agents/
│ │ │ │ ├── api.ts
│ │ │ │ ├── hooks.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── types.ts
│ │ │ ├── api/
│ │ │ │ ├── api-client.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── stream-mode.test.ts
│ │ │ │ └── stream-mode.ts
│ │ │ ├── artifacts/
│ │ │ │ ├── hooks.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── loader.ts
│ │ │ │ └── utils.ts
│ │ │ ├── config/
│ │ │ │ └── index.ts
│ │ │ ├── i18n/
│ │ │ │ ├── context.tsx
│ │ │ │ ├── cookies.ts
│ │ │ │ ├── hooks.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── locale.ts
│ │ │ │ ├── locales/
│ │ │ │ │ ├── en-US.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ ├── types.ts
│ │ │ │ │ └── zh-CN.ts
│ │ │ │ └── server.ts
│ │ │ ├── mcp/
│ │ │ │ ├── api.ts
│ │ │ │ ├── hooks.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── types.ts
│ │ │ ├── memory/
│ │ │ │ ├── api.ts
│ │ │ │ ├── hooks.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── types.ts
│ │ │ ├── messages/
│ │ │ │ └── utils.ts
│ │ │ ├── models/
│ │ │ │ ├── api.ts
│ │ │ │ ├── hooks.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── types.ts
│ │ │ ├── notification/
│ │ │ │ └── hooks.ts
│ │ │ ├── rehype/
│ │ │ │ └── index.ts
│ │ │ ├── settings/
│ │ │ │ ├── hooks.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── local.ts
│ │ │ ├── skills/
│ │ │ │ ├── api.ts
│ │ │ │ ├── hooks.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── type.ts
│ │ │ ├── streamdown/
│ │ │ │ ├── index.ts
│ │ │ │ └── plugins.ts
│ │ │ ├── tasks/
│ │ │ │ ├── context.tsx
│ │ │ │ ├── index.ts
│ │ │ │ └── types.ts
│ │ │ ├── threads/
│ │ │ │ ├── export.ts
│ │ │ │ ├── hooks.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── types.ts
│ │ │ │ └── utils.ts
│ │ │ ├── todos/
│ │ │ │ ├── index.ts
│ │ │ │ └── types.ts
│ │ │ ├── tools/
│ │ │ │ └── utils.ts
│ │ │ ├── uploads/
│ │ │ │ ├── api.ts
│ │ │ │ ├── hooks.ts
│ │ │ │ └── index.ts
│ │ │ └── utils/
│ │ │ ├── datetime.ts
│ │ │ ├── files.tsx
│ │ │ ├── json.ts
│ │ │ ├── markdown.ts
│ │ │ └── uuid.ts
│ │ ├── env.js
│ │ ├── hooks/
│ │ │ └── use-mobile.ts
│ │ ├── lib/
│ │ │ └── utils.ts
│ │ ├── server/
│ │ │ └── better-auth/
│ │ │ ├── client.ts
│ │ │ ├── config.ts
│ │ │ ├── index.ts
│ │ │ └── server.ts
│ │ ├── styles/
│ │ │ └── globals.css
│ │ └── typings/
│ │ └── md.d.ts
│ └── tsconfig.json
├── scripts/
│ ├── check.py
│ ├── check.sh
│ ├── cleanup-containers.sh
│ ├── config-upgrade.sh
│ ├── configure.py
│ ├── deploy.sh
│ ├── docker.sh
│ ├── export_claude_code_oauth.py
│ ├── serve.sh
│ ├── start-daemon.sh
│ ├── tool-error-degradation-detection.sh
│ └── wait-for-port.sh
└── skills/
└── public/
├── bootstrap/
│ ├── SKILL.md
│ ├── references/
│ │ └── conversation-guide.md
│ └── templates/
│ └── SOUL.template.md
├── chart-visualization/
│ ├── SKILL.md
│ ├── references/
│ │ ├── generate_area_chart.md
│ │ ├── generate_bar_chart.md
│ │ ├── generate_boxplot_chart.md
│ │ ├── generate_column_chart.md
│ │ ├── generate_district_map.md
│ │ ├── generate_dual_axes_chart.md
│ │ ├── generate_fishbone_diagram.md
│ │ ├── generate_flow_diagram.md
│ │ ├── generate_funnel_chart.md
│ │ ├── generate_histogram_chart.md
│ │ ├── generate_line_chart.md
│ │ ├── generate_liquid_chart.md
│ │ ├── generate_mind_map.md
│ │ ├── generate_network_graph.md
│ │ ├── generate_organization_chart.md
│ │ ├── generate_path_map.md
│ │ ├── generate_pie_chart.md
│ │ ├── generate_pin_map.md
│ │ ├── generate_radar_chart.md
│ │ ├── generate_sankey_chart.md
│ │ ├── generate_scatter_chart.md
│ │ ├── generate_spreadsheet.md
│ │ ├── generate_treemap_chart.md
│ │ ├── generate_venn_chart.md
│ │ ├── generate_violin_chart.md
│ │ └── generate_word_cloud_chart.md
│ └── scripts/
│ └── generate.js
├── claude-to-deerflow/
│ ├── SKILL.md
│ └── scripts/
│ ├── chat.sh
│ └── status.sh
├── consulting-analysis/
│ └── SKILL.md
├── data-analysis/
│ ├── SKILL.md
│ └── scripts/
│ └── analyze.py
├── deep-research/
│ └── SKILL.md
├── find-skills/
│ ├── SKILL.md
│ └── scripts/
│ └── install-skill.sh
├── frontend-design/
│ ├── LICENSE.txt
│ └── SKILL.md
├── github-deep-research/
│ ├── SKILL.md
│ ├── assets/
│ │ └── report_template.md
│ └── scripts/
│ └── github_api.py
├── image-generation/
│ ├── SKILL.md
│ ├── scripts/
│ │ └── generate.py
│ └── templates/
│ └── doraemon.md
├── podcast-generation/
│ ├── SKILL.md
│ ├── scripts/
│ │ └── generate.py
│ └── templates/
│ └── tech-explainer.md
├── ppt-generation/
│ ├── SKILL.md
│ └── scripts/
│ └── generate.py
├── skill-creator/
│ ├── LICENSE.txt
│ ├── SKILL.md
│ ├── agents/
│ │ ├── analyzer.md
│ │ ├── comparator.md
│ │ └── grader.md
│ ├── assets/
│ │ └── eval_review.html
│ ├── eval-viewer/
│ │ ├── generate_review.py
│ │ └── viewer.html
│ ├── references/
│ │ ├── output-patterns.md
│ │ ├── schemas.md
│ │ └── workflows.md
│ └── scripts/
│ ├── aggregate_benchmark.py
│ ├── generate_report.py
│ ├── improve_description.py
│ ├── init_skill.py
│ ├── package_skill.py
│ ├── quick_validate.py
│ ├── run_eval.py
│ ├── run_loop.py
│ └── utils.py
├── surprise-me/
│ └── SKILL.md
├── vercel-deploy-claimable/
│ ├── SKILL.md
│ └── scripts/
│ └── deploy.sh
├── video-generation/
│ ├── SKILL.md
│ └── scripts/
│ └── generate.py
└── web-design-guidelines/
└── SKILL.md
================================================
FILE CONTENTS
================================================
================================================
FILE: .dockerignore
================================================
.env
Dockerfile
.dockerignore
.git
.gitignore
docker/
# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
.venv/
# Web
node_modules
npm-debug.log
.next
# IDE
.idea/
.vscode/
*.swp
*.swo
# OS
.DS_Store
Thumbs.db
# Project specific
conf.yaml
web/
docs/
examples/
assets/
tests/
*.log
# Exclude directories not needed in Docker context
# Frontend build only needs frontend/
# Backend build only needs backend/
scripts/
logs/
docker/
skills/
frontend/.next
frontend/node_modules
backend/.venv
backend/htmlcov
backend/.coverage
*.md
!README.md
!frontend/README.md
!backend/README.md
================================================
FILE: .gitattributes
================================================
# Normalize line endings to LF for all text files
* text=auto eol=lf
# Shell scripts and makefiles must always use LF
*.sh text eol=lf
Makefile text eol=lf
**/Makefile text eol=lf
# Common config/source files
*.yml text eol=lf
*.yaml text eol=lf
*.toml text eol=lf
*.json text eol=lf
*.md text eol=lf
*.py text eol=lf
*.ts text eol=lf
*.tsx text eol=lf
*.js text eol=lf
*.jsx text eol=lf
*.css text eol=lf
*.scss text eol=lf
*.html text eol=lf
*.env text eol=lf
# Windows scripts
*.bat text eol=crlf
*.cmd text eol=crlf
# Binary assets
*.png binary
*.jpg binary
*.jpeg binary
*.gif binary
*.webp binary
*.ico binary
*.pdf binary
*.zip binary
*.tar binary
*.gz binary
*.mp4 binary
*.mov binary
*.woff binary
*.woff2 binary
================================================
FILE: .github/ISSUE_TEMPLATE/runtime-information.yml
================================================
name: Runtime Information
description: Report runtime/environment details to help reproduce an issue.
title: "[runtime] "
labels:
- needs-triage
body:
- type: markdown
attributes:
value: |
Thanks for sharing runtime details.
Complete this form so maintainers can quickly reproduce and diagnose the problem.
- type: input
id: summary
attributes:
label: Problem summary
description: Short summary of the issue.
placeholder: e.g. make dev fails to start gateway service
validations:
required: true
- type: textarea
id: expected
attributes:
label: Expected behavior
placeholder: What did you expect to happen?
validations:
required: true
- type: textarea
id: actual
attributes:
label: Actual behavior
placeholder: What happened instead? Include key error lines.
validations:
required: true
- type: dropdown
id: os
attributes:
label: Operating system
options:
- macOS
- Linux
- Windows
- Other
validations:
required: true
- type: input
id: platform_details
attributes:
label: Platform details
description: Add architecture and shell if relevant.
placeholder: e.g. arm64, zsh
- type: input
id: python_version
attributes:
label: Python version
placeholder: e.g. Python 3.12.9
- type: input
id: node_version
attributes:
label: Node.js version
placeholder: e.g. v23.11.0
- type: input
id: pnpm_version
attributes:
label: pnpm version
placeholder: e.g. 10.26.2
- type: input
id: uv_version
attributes:
label: uv version
placeholder: e.g. 0.7.20
- type: dropdown
id: run_mode
attributes:
label: How are you running DeerFlow?
options:
- Local (make dev)
- Docker (make docker-dev)
- CI
- Other
validations:
required: true
- type: textarea
id: reproduce
attributes:
label: Reproduction steps
description: Provide exact commands and sequence.
placeholder: |
1. make check
2. make install
3. make dev
4. ...
validations:
required: true
- type: textarea
id: logs
attributes:
label: Relevant logs
description: Paste key lines from logs (for example logs/gateway.log, logs/frontend.log).
render: shell
validations:
required: true
- type: textarea
id: git_info
attributes:
label: Git state
description: Share output of git branch and latest commit SHA.
placeholder: |
branch: feature/my-branch
commit: abcdef1
- type: textarea
id: additional
attributes:
label: Additional context
description: Add anything else that might help triage.
================================================
FILE: .github/copilot-instructions.md
================================================
# Copilot Onboarding Instructions for DeerFlow
Use this file as the default operating guide for this repository. Follow it first, and only search the codebase when this file is incomplete or incorrect.
## 1) Repository Summary
DeerFlow is a full-stack "super agent harness".
- Backend: Python 3.12, LangGraph + FastAPI gateway, sandbox/tool system, memory, MCP integration.
- Frontend: Next.js 16 + React 19 + TypeScript + pnpm.
- Local dev entrypoint: root `Makefile` starts backend + frontend + nginx on `http://localhost:2026`.
- Docker dev entrypoint: `make docker-*` (mode-aware provisioner startup from `config.yaml`).
Current repo footprint is medium-large (backend service, frontend app, docker stack, skills library, docs).
## 2) Runtime and Toolchain Requirements
Validated in this repo on macOS:
- Node.js `>=22` (validated with Node `23.11.0`)
- pnpm (repo expects lockfile generated by pnpm 10; validated with pnpm `10.26.2` and `10.15.0`)
- Python `>=3.12` (CI uses `3.12`)
- `uv` (validated with `0.7.20`)
- `nginx` (required for `make dev` unified local endpoint)
Always run from repo root unless a command explicitly says otherwise.
## 3) Build/Test/Lint/Run - Verified Command Sequences
These were executed and validated in this repository.
### A. Bootstrap and install
1. Check prerequisites:
```bash
make check
```
Observed: passes when required tools are installed.
2. Install dependencies (recommended order: backend then frontend, as implemented by `make install`):
```bash
make install
```
### B. Backend CI-equivalent validation
Run from `backend/`:
```bash
make lint
make test
```
Validated results:
- `make lint`: pass (`ruff check .`)
- `make test`: pass (`277 passed, 15 warnings in ~76.6s`)
CI parity:
- `.github/workflows/backend-unit-tests.yml` runs on pull requests.
- CI executes `uv sync --group dev`, then `make lint`, then `make test` in `backend/`.
### C. Frontend validation
Run from `frontend/`.
Recommended reliable sequence:
```bash
pnpm lint
pnpm typecheck
BETTER_AUTH_SECRET=local-dev-secret pnpm build
```
Observed failure modes and workarounds:
- `pnpm build` fails without `BETTER_AUTH_SECRET` in production-mode env validation.
- Workaround: set `BETTER_AUTH_SECRET` (best) or set `SKIP_ENV_VALIDATION=1`.
- Even with `SKIP_ENV_VALIDATION=1`, Better Auth can still warn/error in logs about default secret; prefer setting a real non-default secret.
- `pnpm check` currently fails (`next lint` invocation is incompatible here and resolves to an invalid directory). Do not rely on `pnpm check`; run `pnpm lint` and `pnpm typecheck` explicitly.
### D. Run locally (all services)
From root:
```bash
make dev
```
Behavior:
- Stops existing local services first.
- Starts LangGraph (`2024`), Gateway (`8001`), Frontend (`3000`), nginx (`2026`).
- Unified app endpoint: `http://localhost:2026`.
- Logs: `logs/langgraph.log`, `logs/gateway.log`, `logs/frontend.log`, `logs/nginx.log`.
Stop services:
```bash
make stop
```
If tool sessions/timeouts interrupt `make dev`, run `make stop` again to ensure cleanup.
### E. Config bootstrap
From root:
```bash
make config
```
Important behavior:
- This intentionally aborts if `config.yaml` (or `config.yml`/`configure.yml`) already exists.
- Use `make config` only for first-time setup in a clean clone.
## 4) Command Order That Minimizes Failures
Use this exact order for local code changes:
1. `make check`
2. `make install` (if frontend fails with proxy errors, rerun frontend install with proxy vars unset)
3. Backend checks: `cd backend && make lint && make test`
4. Frontend checks: `cd frontend && pnpm lint && pnpm typecheck`
5. Frontend build (if UI changes or release-sensitive changes): `BETTER_AUTH_SECRET=... pnpm build`
Always run backend lint/tests before opening PRs because that is what CI enforces.
## 5) Project Layout and Architecture (High-Value Paths)
Root-level orchestration and config:
- `Makefile` - main local/dev/docker command entrypoints
- `config.example.yaml` - primary app config template
- `config.yaml` - local active config (gitignored)
- `docker/docker-compose-dev.yaml` - Docker dev topology
- `.github/workflows/backend-unit-tests.yml` - PR validation workflow
Backend core:
- `backend/packages/harness/deerflow/agents/` - lead agent, middleware chain, memory
- `backend/app/gateway/` - FastAPI gateway API
- `backend/packages/harness/deerflow/sandbox/` - sandbox provider + tool wrappers
- `backend/packages/harness/deerflow/subagents/` - subagent registry/execution
- `backend/packages/harness/deerflow/mcp/` - MCP integration
- `backend/langgraph.json` - graph entrypoint (`deerflow.agents:make_lead_agent`)
- `backend/pyproject.toml` - Python deps and `requires-python`
- `backend/ruff.toml` - lint/format policy
- `backend/tests/` - backend unit and integration-like tests
Frontend core:
- `frontend/src/app/` - Next.js routes/pages
- `frontend/src/components/` - UI components
- `frontend/src/core/` - app logic (threads, tools, API, models)
- `frontend/src/env.js` - env schema/validation (critical for build behavior)
- `frontend/package.json` - scripts/deps
- `frontend/eslint.config.js` - lint rules
- `frontend/tsconfig.json` - TS config
Skills and assets:
- `skills/public/` - built-in skill packs loaded by agent runtime
## 6) Pre-Checkin / Validation Expectations
Before submitting changes, run at minimum:
- Backend: `cd backend && make lint && make test`
- Frontend (if touched): `cd frontend && pnpm lint && pnpm typecheck`
- Frontend build when changing env/auth/routing/build-sensitive files: `BETTER_AUTH_SECRET=... pnpm build`
If touching orchestration/config (`Makefile`, `docker/*`, `config*.yaml`), also run `make dev` and verify the four services start.
## 7) Non-Obvious Dependencies and Gotchas
- Proxy env vars can silently break frontend network operations (`pnpm install`/registry access).
- `BETTER_AUTH_SECRET` is effectively required for reliable frontend production build validation.
- Next.js may warn about multiple lockfiles and workspace root inference; this is currently a warning, not a build blocker.
- `make config` is non-idempotent by design when config already exists.
- `make dev` includes process cleanup and can emit shutdown logs/noise if interrupted; this is expected.
## 8) Root Inventory (quick reference)
Important root entries:
- `.github/`
- `backend/`
- `frontend/`
- `docker/`
- `skills/`
- `scripts/`
- `docs/`
- `README.md`
- `CONTRIBUTING.md`
- `Makefile`
- `config.example.yaml`
- `extensions_config.example.json`
## 9) Instruction Priority
Trust this onboarding guide first.
Only do broad repo searches (`grep/find/code search`) when:
- you need file-level implementation details not listed here,
- a command here fails and you need updated replacement behavior,
- or CI/workflow definitions have changed since this file was written.
================================================
FILE: .github/workflows/backend-unit-tests.yml
================================================
name: Unit Tests
on:
push:
branches: [ 'main' ]
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
concurrency:
group: unit-tests-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
backend-unit-tests:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: '3.12'
- name: Install uv
uses: astral-sh/setup-uv@v7
- name: Install backend dependencies
working-directory: backend
run: uv sync --group dev
- name: Lint backend
working-directory: backend
run: make lint
- name: Run unit tests of backend
working-directory: backend
run: make test
================================================
FILE: .gitignore
================================================
# DeerFlow docker image cache
docker/.cache/
# OS generated files
.DS_Store
*.local
._*
.Spotlight-V100
.Trashes
ehthumbs.db
Thumbs.db
# Python cache
__pycache__/
*.pyc
*.pyo
# Virtual environments
.venv
venv/
# Environment variables
.env
# Configuration files
config.yaml
mcp_config.json
extensions_config.json
# IDE
.idea/
.vscode/
# Coverage report
coverage.xml
coverage/
.deer-flow/
.claude/
skills/custom/*
logs/
log/
# Local git hooks (keep only on this machine, do not push)
.githooks/
# pnpm
.pnpm-store
sandbox_image_cache.tar
# ignore the legacy `web` folder
web/
# Deployment artifacts
backend/Dockerfile.langgraph
config.yaml.bak
================================================
FILE: CONTRIBUTING.md
================================================
# Contributing to DeerFlow
Thank you for your interest in contributing to DeerFlow! This guide will help you set up your development environment and understand our development workflow.
## Development Environment Setup
We offer two development environments. **Docker is recommended** for the most consistent and hassle-free experience.
### Option 1: Docker Development (Recommended)
Docker provides a consistent, isolated environment with all dependencies pre-configured. No need to install Node.js, Python, or nginx on your local machine.
#### Prerequisites
- Docker Desktop or Docker Engine
- pnpm (for caching optimization)
#### Setup Steps
1. **Configure the application**:
```bash
# Copy example configuration
cp config.example.yaml config.yaml
# Set your API keys
export OPENAI_API_KEY="your-key-here"
# or edit config.yaml directly
```
2. **Initialize Docker environment** (first time only):
```bash
make docker-init
```
This will:
- Build Docker images
- Install frontend dependencies (pnpm)
- Install backend dependencies (uv)
- Share pnpm cache with host for faster builds
3. **Start development services**:
```bash
make docker-start
```
`make docker-start` reads `config.yaml` and starts `provisioner` only for provisioner/Kubernetes sandbox mode.
All services will start with hot-reload enabled:
- Frontend changes are automatically reloaded
- Backend changes trigger automatic restart
- LangGraph server supports hot-reload
4. **Access the application**:
- Web Interface: http://localhost:2026
- API Gateway: http://localhost:2026/api/*
- LangGraph: http://localhost:2026/api/langgraph/*
#### Docker Commands
```bash
# Build the custom k3s image (with pre-cached sandbox image)
make docker-init
# Start Docker services (mode-aware, localhost:2026)
make docker-start
# Stop Docker development services
make docker-stop
# View Docker development logs
make docker-logs
# View Docker frontend logs
make docker-logs-frontend
# View Docker gateway logs
make docker-logs-gateway
```
#### Docker Architecture
```
Host Machine
↓
Docker Compose (deer-flow-dev)
├→ nginx (port 2026) ← Reverse proxy
├→ web (port 3000) ← Frontend with hot-reload
├→ api (port 8001) ← Gateway API with hot-reload
├→ langgraph (port 2024) ← LangGraph server with hot-reload
└→ provisioner (optional, port 8002) ← Started only in provisioner/K8s sandbox mode
```
**Benefits of Docker Development**:
- ✅ Consistent environment across different machines
- ✅ No need to install Node.js, Python, or nginx locally
- ✅ Isolated dependencies and services
- ✅ Easy cleanup and reset
- ✅ Hot-reload for all services
- ✅ Production-like environment
### Option 2: Local Development
If you prefer to run services directly on your machine:
#### Prerequisites
Check that you have all required tools installed:
```bash
make check
```
Required tools:
- Node.js 22+
- pnpm
- uv (Python package manager)
- nginx
#### Setup Steps
1. **Configure the application** (same as Docker setup above)
2. **Install dependencies**:
```bash
make install
```
3. **Run development server** (starts all services with nginx):
```bash
make dev
```
4. **Access the application**:
- Web Interface: http://localhost:2026
- All API requests are automatically proxied through nginx
#### Manual Service Control
If you need to start services individually:
1. **Start backend services**:
```bash
# Terminal 1: Start LangGraph Server (port 2024)
cd backend
make dev
# Terminal 2: Start Gateway API (port 8001)
cd backend
make gateway
# Terminal 3: Start Frontend (port 3000)
cd frontend
pnpm dev
```
2. **Start nginx**:
```bash
make nginx
# or directly: nginx -c $(pwd)/docker/nginx/nginx.local.conf -g 'daemon off;'
```
3. **Access the application**:
- Web Interface: http://localhost:2026
#### Nginx Configuration
The nginx configuration provides:
- Unified entry point on port 2026
- Routes `/api/langgraph/*` to LangGraph Server (2024)
- Routes other `/api/*` endpoints to Gateway API (8001)
- Routes non-API requests to Frontend (3000)
- Centralized CORS handling
- SSE/streaming support for real-time agent responses
- Optimized timeouts for long-running operations
## Project Structure
```
deer-flow/
├── config.example.yaml # Configuration template
├── extensions_config.example.json # MCP and Skills configuration template
├── Makefile # Build and development commands
├── scripts/
│ └── docker.sh # Docker management script
├── docker/
│ ├── docker-compose-dev.yaml # Docker Compose configuration
│ └── nginx/
│ ├── nginx.conf # Nginx config for Docker
│ └── nginx.local.conf # Nginx config for local dev
├── backend/ # Backend application
│ ├── src/
│ │ ├── gateway/ # Gateway API (port 8001)
│ │ ├── agents/ # LangGraph agents (port 2024)
│ │ ├── mcp/ # Model Context Protocol integration
│ │ ├── skills/ # Skills system
│ │ └── sandbox/ # Sandbox execution
│ ├── docs/ # Backend documentation
│ └── Makefile # Backend commands
├── frontend/ # Frontend application
│ └── Makefile # Frontend commands
└── skills/ # Agent skills
├── public/ # Public skills
└── custom/ # Custom skills
```
## Architecture
```
Browser
↓
Nginx (port 2026) ← Unified entry point
├→ Frontend (port 3000) ← / (non-API requests)
├→ Gateway API (port 8001) ← /api/models, /api/mcp, /api/skills, /api/threads/*/artifacts
└→ LangGraph Server (port 2024) ← /api/langgraph/* (agent interactions)
```
## Development Workflow
1. **Create a feature branch**:
```bash
git checkout -b feature/your-feature-name
```
2. **Make your changes** with hot-reload enabled
3. **Test your changes** thoroughly
4. **Commit your changes**:
```bash
git add .
git commit -m "feat: description of your changes"
```
5. **Push and create a Pull Request**:
```bash
git push origin feature/your-feature-name
```
## Testing
```bash
# Backend tests
cd backend
uv run pytest
# Frontend tests
cd frontend
pnpm test
```
### PR Regression Checks
Every pull request runs the backend regression workflow at [.github/workflows/backend-unit-tests.yml](.github/workflows/backend-unit-tests.yml), including:
- `tests/test_provisioner_kubeconfig.py`
- `tests/test_docker_sandbox_mode_detection.py`
## Code Style
- **Backend (Python)**: We use `ruff` for linting and formatting
- **Frontend (TypeScript)**: We use ESLint and Prettier
## Documentation
- [Configuration Guide](backend/docs/CONFIGURATION.md) - Setup and configuration
- [Architecture Overview](backend/CLAUDE.md) - Technical architecture
- [MCP Setup Guide](MCP_SETUP.md) - Model Context Protocol configuration
## Need Help?
- Check existing [Issues](https://github.com/bytedance/deer-flow/issues)
- Read the [Documentation](backend/docs/)
- Ask questions in [Discussions](https://github.com/bytedance/deer-flow/discussions)
## License
By contributing to DeerFlow, you agree that your contributions will be licensed under the [MIT License](./LICENSE).
================================================
FILE: LICENSE
================================================
MIT License
Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
Copyright (c) 2025-2026 DeerFlow Authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: Makefile
================================================
# DeerFlow - Unified Development Environment
.PHONY: help config config-upgrade check install dev dev-daemon start stop up down clean docker-init docker-start docker-stop docker-logs docker-logs-frontend docker-logs-gateway
PYTHON ?= python
help:
@echo "DeerFlow Development Commands:"
@echo " make config - Generate local config files (aborts if config already exists)"
@echo " make config-upgrade - Merge new fields from config.example.yaml into config.yaml"
@echo " make check - Check if all required tools are installed"
@echo " make install - Install all dependencies (frontend + backend)"
@echo " make setup-sandbox - Pre-pull sandbox container image (recommended)"
@echo " make dev - Start all services in development mode (with hot-reloading)"
@echo " make dev-daemon - Start all services in background (daemon mode)"
@echo " make start - Start all services in production mode (optimized, no hot-reloading)"
@echo " make stop - Stop all running services"
@echo " make clean - Clean up processes and temporary files"
@echo ""
@echo "Docker Production Commands:"
@echo " make up - Build and start production Docker services (localhost:2026)"
@echo " make down - Stop and remove production Docker containers"
@echo ""
@echo "Docker Development Commands:"
@echo " make docker-init - Pull the sandbox image"
@echo " make docker-start - Start Docker services (mode-aware from config.yaml, localhost:2026)"
@echo " make docker-stop - Stop Docker development services"
@echo " make docker-logs - View Docker development logs"
@echo " make docker-logs-frontend - View Docker frontend logs"
@echo " make docker-logs-gateway - View Docker gateway logs"
config:
@$(PYTHON) ./scripts/configure.py
config-upgrade:
@./scripts/config-upgrade.sh
# Check required tools
check:
@$(PYTHON) ./scripts/check.py
# Install all dependencies
install:
@echo "Installing backend dependencies..."
@cd backend && uv sync
@echo "Installing frontend dependencies..."
@cd frontend && pnpm install
@echo "✓ All dependencies installed"
@echo ""
@echo "=========================================="
@echo " Optional: Pre-pull Sandbox Image"
@echo "=========================================="
@echo ""
@echo "If you plan to use Docker/Container-based sandbox, you can pre-pull the image:"
@echo " make setup-sandbox"
@echo ""
# Pre-pull sandbox Docker image (optional but recommended)
setup-sandbox:
@echo "=========================================="
@echo " Pre-pulling Sandbox Container Image"
@echo "=========================================="
@echo ""
@IMAGE=$$(grep -A 20 "# sandbox:" config.yaml 2>/dev/null | grep "image:" | awk '{print $$2}' | head -1); \
if [ -z "$$IMAGE" ]; then \
IMAGE="enterprise-public-cn-beijing.cr.volces.com/vefaas-public/all-in-one-sandbox:latest"; \
echo "Using default image: $$IMAGE"; \
else \
echo "Using configured image: $$IMAGE"; \
fi; \
echo ""; \
if command -v container >/dev/null 2>&1 && [ "$$(uname)" = "Darwin" ]; then \
echo "Detected Apple Container on macOS, pulling image..."; \
container pull "$$IMAGE" || echo "⚠ Apple Container pull failed, will try Docker"; \
fi; \
if command -v docker >/dev/null 2>&1; then \
echo "Pulling image using Docker..."; \
if docker pull "$$IMAGE"; then \
echo ""; \
echo "✓ Sandbox image pulled successfully"; \
else \
echo ""; \
echo "⚠ Failed to pull sandbox image (this is OK for local sandbox mode)"; \
fi; \
else \
echo "✗ Neither Docker nor Apple Container is available"; \
echo " Please install Docker: https://docs.docker.com/get-docker/"; \
exit 1; \
fi
# Start all services in development mode (with hot-reloading)
dev:
@./scripts/serve.sh --dev
# Start all services in production mode (with optimizations)
start:
@./scripts/serve.sh --prod
# Start all services in daemon mode (background)
dev-daemon:
@./scripts/start-daemon.sh
# Stop all services
stop:
@echo "Stopping all services..."
@-pkill -f "langgraph dev" 2>/dev/null || true
@-pkill -f "uvicorn app.gateway.app:app" 2>/dev/null || true
@-pkill -f "next dev" 2>/dev/null || true
@-pkill -f "next start" 2>/dev/null || true
@-pkill -f "next-server" 2>/dev/null || true
@-pkill -f "next-server" 2>/dev/null || true
@-nginx -c $(PWD)/docker/nginx/nginx.local.conf -p $(PWD) -s quit 2>/dev/null || true
@sleep 1
@-pkill -9 nginx 2>/dev/null || true
@echo "Cleaning up sandbox containers..."
@-./scripts/cleanup-containers.sh deer-flow-sandbox 2>/dev/null || true
@echo "✓ All services stopped"
# Clean up
clean: stop
@echo "Cleaning up..."
@-rm -rf backend/.deer-flow 2>/dev/null || true
@-rm -rf backend/.langgraph_api 2>/dev/null || true
@-rm -rf logs/*.log 2>/dev/null || true
@echo "✓ Cleanup complete"
# ==========================================
# Docker Development Commands
# ==========================================
# Initialize Docker containers and install dependencies
docker-init:
@./scripts/docker.sh init
# Start Docker development environment
docker-start:
@./scripts/docker.sh start
# Stop Docker development environment
docker-stop:
@./scripts/docker.sh stop
# View Docker development logs
docker-logs:
@./scripts/docker.sh logs
# View Docker development logs
docker-logs-frontend:
@./scripts/docker.sh logs --frontend
docker-logs-gateway:
@./scripts/docker.sh logs --gateway
# ==========================================
# Production Docker Commands
# ==========================================
# Build and start production services
up:
@./scripts/deploy.sh
# Stop and remove production containers
down:
@./scripts/deploy.sh down
================================================
FILE: README.md
================================================
# 🦌 DeerFlow - 2.0
English | [中文](./README_zh.md) | [日本語](./README_ja.md)
[](./backend/pyproject.toml)
[](./Makefile)
[](./LICENSE)
<a href="https://trendshift.io/repositories/14699" target="_blank"><img src="https://trendshift.io/api/badge/repositories/14699" alt="bytedance%2Fdeer-flow | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
> On February 28th, 2026, DeerFlow claimed the 🏆 #1 spot on GitHub Trending following the launch of version 2. Thanks a million to our incredible community — you made this happen! 💪🔥
DeerFlow (**D**eep **E**xploration and **E**fficient **R**esearch **Flow**) is an open-source **super agent harness** that orchestrates **sub-agents**, **memory**, and **sandboxes** to do almost anything — powered by **extensible skills**.
https://github.com/user-attachments/assets/a8bcadc4-e040-4cf2-8fda-dd768b999c18
> [!NOTE]
> **DeerFlow 2.0 is a ground-up rewrite.** It shares no code with v1. If you're looking for the original Deep Research framework, it's maintained on the [`1.x` branch](https://github.com/bytedance/deer-flow/tree/main-1.x) — contributions there are still welcome. Active development has moved to 2.0.
## Official Website
[<img width="2880" height="1600" alt="image" src="https://github.com/user-attachments/assets/a598c49f-3b2f-41ea-a052-05e21349188a" />](https://deerflow.tech)
Learn more and see **real demos** on our [**official website**](https://deerflow.tech).
## Coding Plan from ByteDance Volcengine
<img width="4808" height="2400" alt="英文方舟" src="https://github.com/user-attachments/assets/2ecc7b9d-50be-4185-b1f7-5542d222fb2d" />
- We strongly recommend using Doubao-Seed-2.0-Code, DeepSeek v3.2 and Kimi 2.5 to run DeerFlow
- [Learn more](https://www.byteplus.com/en/activity/codingplan?utm_campaign=deer_flow&utm_content=deer_flow&utm_medium=devrel&utm_source=OWO&utm_term=deer_flow)
- [中国大陆地区的开发者请点击这里](https://www.volcengine.com/activity/codingplan?utm_campaign=deer_flow&utm_content=deer_flow&utm_medium=devrel&utm_source=OWO&utm_term=deer_flow)
## InfoQuest
DeerFlow has newly integrated the intelligent search and crawling toolset independently developed by BytePlus--[InfoQuest (supports free online experience)](https://docs.byteplus.com/en/docs/InfoQuest/What_is_Info_Quest)
<a href="https://docs.byteplus.com/en/docs/InfoQuest/What_is_Info_Quest" target="_blank">
<img
src="https://sf16-sg.tiktokcdn.com/obj/eden-sg/hubseh7bsbps/20251208-160108.png" alt="InfoQuest_banner"
/>
</a>
---
## Table of Contents
- [🦌 DeerFlow - 2.0](#-deerflow---20)
- [Official Website](#official-website)
- [InfoQuest](#infoquest)
- [Table of Contents](#table-of-contents)
- [Quick Start](#quick-start)
- [Configuration](#configuration)
- [Running the Application](#running-the-application)
- [Option 1: Docker (Recommended)](#option-1-docker-recommended)
- [Option 2: Local Development](#option-2-local-development)
- [Advanced](#advanced)
- [Sandbox Mode](#sandbox-mode)
- [MCP Server](#mcp-server)
- [IM Channels](#im-channels)
- [From Deep Research to Super Agent Harness](#from-deep-research-to-super-agent-harness)
- [Core Features](#core-features)
- [Skills \& Tools](#skills--tools)
- [Claude Code Integration](#claude-code-integration)
- [Sub-Agents](#sub-agents)
- [Sandbox \& File System](#sandbox--file-system)
- [Context Engineering](#context-engineering)
- [Long-Term Memory](#long-term-memory)
- [Recommended Models](#recommended-models)
- [Embedded Python Client](#embedded-python-client)
- [Documentation](#documentation)
- [Contributing](#contributing)
- [License](#license)
- [Acknowledgments](#acknowledgments)
- [Key Contributors](#key-contributors)
- [Star History](#star-history)
## Quick Start
### Configuration
1. **Clone the DeerFlow repository**
```bash
git clone https://github.com/bytedance/deer-flow.git
cd deer-flow
```
2. **Generate local configuration files**
From the project root directory (`deer-flow/`), run:
```bash
make config
```
This command creates local configuration files based on the provided example templates.
3. **Configure your preferred model(s)**
Edit `config.yaml` and define at least one model:
```yaml
models:
- name: gpt-4 # Internal identifier
display_name: GPT-4 # Human-readable name
use: langchain_openai:ChatOpenAI # LangChain class path
model: gpt-4 # Model identifier for API
api_key: $OPENAI_API_KEY # API key (recommended: use env var)
max_tokens: 4096 # Maximum tokens per request
temperature: 0.7 # Sampling temperature
- name: openrouter-gemini-2.5-flash
display_name: Gemini 2.5 Flash (OpenRouter)
use: langchain_openai:ChatOpenAI
model: google/gemini-2.5-flash-preview
api_key: $OPENAI_API_KEY # OpenRouter still uses the OpenAI-compatible field name here
base_url: https://openrouter.ai/api/v1
- name: gpt-5-responses
display_name: GPT-5 (Responses API)
use: langchain_openai:ChatOpenAI
model: gpt-5
api_key: $OPENAI_API_KEY
use_responses_api: true
output_version: responses/v1
```
OpenRouter and similar OpenAI-compatible gateways should be configured with `langchain_openai:ChatOpenAI` plus `base_url`. If you prefer a provider-specific environment variable name, point `api_key` at that variable explicitly (for example `api_key: $OPENROUTER_API_KEY`).
To route OpenAI models through `/v1/responses`, keep using `langchain_openai:ChatOpenAI` and set `use_responses_api: true` with `output_version: responses/v1`.
CLI-backed provider examples:
```yaml
models:
- name: gpt-5.4
display_name: GPT-5.4 (Codex CLI)
use: deerflow.models.openai_codex_provider:CodexChatModel
model: gpt-5.4
supports_thinking: true
supports_reasoning_effort: true
- name: claude-sonnet-4.6
display_name: Claude Sonnet 4.6 (Claude Code OAuth)
use: deerflow.models.claude_provider:ClaudeChatModel
model: claude-sonnet-4-6
max_tokens: 4096
supports_thinking: true
```
- Codex CLI reads `~/.codex/auth.json`
- The Codex Responses endpoint currently rejects `max_tokens` and `max_output_tokens`, so `CodexChatModel` does not expose a request-level token cap
- Claude Code accepts `CLAUDE_CODE_OAUTH_TOKEN`, `ANTHROPIC_AUTH_TOKEN`, `CLAUDE_CODE_OAUTH_TOKEN_FILE_DESCRIPTOR`, `CLAUDE_CODE_CREDENTIALS_PATH`, or plaintext `~/.claude/.credentials.json`
- On macOS, DeerFlow does not probe Keychain automatically. Export Claude Code auth explicitly if needed:
```bash
eval "$(python3 scripts/export_claude_code_oauth.py --print-export)"
```
4. **Set API keys for your configured model(s)**
Choose one of the following methods:
- Option A: Edit the `.env` file in the project root (Recommended)
```bash
TAVILY_API_KEY=your-tavily-api-key
OPENAI_API_KEY=your-openai-api-key
# OpenRouter also uses OPENAI_API_KEY when your config uses langchain_openai:ChatOpenAI + base_url.
# Add other provider keys as needed
INFOQUEST_API_KEY=your-infoquest-api-key
```
- Option B: Export environment variables in your shell
```bash
export OPENAI_API_KEY=your-openai-api-key
```
For CLI-backed providers:
- Codex CLI: `~/.codex/auth.json`
- Claude Code OAuth: explicit env/file handoff or `~/.claude/.credentials.json`
- Option C: Edit `config.yaml` directly (Not recommended for production)
```yaml
models:
- name: gpt-4
api_key: your-actual-api-key-here # Replace placeholder
```
### Running the Application
#### Option 1: Docker (Recommended)
**Development** (hot-reload, source mounts):
```bash
make docker-init # Pull sandbox image (only once or when image updates)
make docker-start # Start services (auto-detects sandbox mode from config.yaml)
```
`make docker-start` starts `provisioner` only when `config.yaml` uses provisioner mode (`sandbox.use: deerflow.community.aio_sandbox:AioSandboxProvider` with `provisioner_url`).
Backend processes automatically pick up `config.yaml` changes on the next config access, so model metadata updates do not require a manual restart during development.
**Production** (builds images locally, mounts runtime config and data):
```bash
make up # Build images and start all production services
make down # Stop and remove containers
```
> [!NOTE]
> The LangGraph agent server currently runs via `langgraph dev` (the open-source CLI server).
Access: http://localhost:2026
See [CONTRIBUTING.md](CONTRIBUTING.md) for detailed Docker development guide.
#### Option 2: Local Development
If you prefer running services locally:
Prerequisite: complete the "Configuration" steps above first (`make config` and model API keys). `make dev` requires a valid configuration file (defaults to `config.yaml` in the project root; can be overridden via `DEER_FLOW_CONFIG_PATH`).
1. **Check prerequisites**:
```bash
make check # Verifies Node.js 22+, pnpm, uv, nginx
```
2. **Install dependencies**:
```bash
make install # Install backend + frontend dependencies
```
3. **(Optional) Pre-pull sandbox image**:
```bash
# Recommended if using Docker/Container-based sandbox
make setup-sandbox
```
4. **Start services**:
```bash
make dev
```
5. **Access**: http://localhost:2026
### Advanced
#### Sandbox Mode
DeerFlow supports multiple sandbox execution modes:
- **Local Execution** (runs sandbox code directly on the host machine)
- **Docker Execution** (runs sandbox code in isolated Docker containers)
- **Docker Execution with Kubernetes** (runs sandbox code in Kubernetes pods via provisioner service)
For Docker development, service startup follows `config.yaml` sandbox mode. In Local/Docker modes, `provisioner` is not started.
See the [Sandbox Configuration Guide](backend/docs/CONFIGURATION.md#sandbox) to configure your preferred mode.
#### MCP Server
DeerFlow supports configurable MCP servers and skills to extend its capabilities.
For HTTP/SSE MCP servers, OAuth token flows are supported (`client_credentials`, `refresh_token`).
See the [MCP Server Guide](backend/docs/MCP_SERVER.md) for detailed instructions.
#### IM Channels
DeerFlow supports receiving tasks from messaging apps. Channels auto-start when configured — no public IP required for any of them.
| Channel | Transport | Difficulty |
|---------|-----------|------------|
| Telegram | Bot API (long-polling) | Easy |
| Slack | Socket Mode | Moderate |
| Feishu / Lark | WebSocket | Moderate |
**Configuration in `config.yaml`:**
```yaml
channels:
# LangGraph Server URL (default: http://localhost:2024)
langgraph_url: http://localhost:2024
# Gateway API URL (default: http://localhost:8001)
gateway_url: http://localhost:8001
# Optional: global session defaults for all mobile channels
session:
assistant_id: lead_agent
config:
recursion_limit: 100
context:
thinking_enabled: true
is_plan_mode: false
subagent_enabled: false
feishu:
enabled: true
app_id: $FEISHU_APP_ID
app_secret: $FEISHU_APP_SECRET
slack:
enabled: true
bot_token: $SLACK_BOT_TOKEN # xoxb-...
app_token: $SLACK_APP_TOKEN # xapp-... (Socket Mode)
allowed_users: [] # empty = allow all
telegram:
enabled: true
bot_token: $TELEGRAM_BOT_TOKEN
allowed_users: [] # empty = allow all
# Optional: per-channel / per-user session settings
session:
assistant_id: mobile_agent
context:
thinking_enabled: false
users:
"123456789":
assistant_id: vip_agent
config:
recursion_limit: 150
context:
thinking_enabled: true
subagent_enabled: true
```
Set the corresponding API keys in your `.env` file:
```bash
# Telegram
TELEGRAM_BOT_TOKEN=123456789:ABCdefGHIjklMNOpqrSTUvwxYZ
# Slack
SLACK_BOT_TOKEN=xoxb-...
SLACK_APP_TOKEN=xapp-...
# Feishu / Lark
FEISHU_APP_ID=cli_xxxx
FEISHU_APP_SECRET=your_app_secret
```
**Telegram Setup**
1. Chat with [@BotFather](https://t.me/BotFather), send `/newbot`, and copy the HTTP API token.
2. Set `TELEGRAM_BOT_TOKEN` in `.env` and enable the channel in `config.yaml`.
**Slack Setup**
1. Create a Slack App at [api.slack.com/apps](https://api.slack.com/apps) → Create New App → From scratch.
2. Under **OAuth & Permissions**, add Bot Token Scopes: `app_mentions:read`, `chat:write`, `im:history`, `im:read`, `im:write`, `files:write`.
3. Enable **Socket Mode** → generate an App-Level Token (`xapp-…`) with `connections:write` scope.
4. Under **Event Subscriptions**, subscribe to bot events: `app_mention`, `message.im`.
5. Set `SLACK_BOT_TOKEN` and `SLACK_APP_TOKEN` in `.env` and enable the channel in `config.yaml`.
**Feishu / Lark Setup**
1. Create an app on [Feishu Open Platform](https://open.feishu.cn/) → enable **Bot** capability.
2. Add permissions: `im:message`, `im:message.p2p_msg:readonly`, `im:resource`.
3. Under **Events**, subscribe to `im.message.receive_v1` and select **Long Connection** mode.
4. Copy the App ID and App Secret. Set `FEISHU_APP_ID` and `FEISHU_APP_SECRET` in `.env` and enable the channel in `config.yaml`.
**Commands**
Once a channel is connected, you can interact with DeerFlow directly from the chat:
| Command | Description |
|---------|-------------|
| `/new` | Start a new conversation |
| `/status` | Show current thread info |
| `/models` | List available models |
| `/memory` | View memory |
| `/help` | Show help |
> Messages without a command prefix are treated as regular chat — DeerFlow creates a thread and responds conversationally.
## From Deep Research to Super Agent Harness
DeerFlow started as a Deep Research framework — and the community ran with it. Since launch, developers have pushed it far beyond research: building data pipelines, generating slide decks, spinning up dashboards, automating content workflows. Things we never anticipated.
That told us something important: DeerFlow wasn't just a research tool. It was a **harness** — a runtime that gives agents the infrastructure to actually get work done.
So we rebuilt it from scratch.
DeerFlow 2.0 is no longer a framework you wire together. It's a super agent harness — batteries included, fully extensible. Built on LangGraph and LangChain, it ships with everything an agent needs out of the box: a filesystem, memory, skills, sandboxed execution, and the ability to plan and spawn sub-agents for complex, multi-step tasks.
Use it as-is. Or tear it apart and make it yours.
## Core Features
### Skills & Tools
Skills are what make DeerFlow do *almost anything*.
A standard Agent Skill is a structured capability module — a Markdown file that defines a workflow, best practices, and references to supporting resources. DeerFlow ships with built-in skills for research, report generation, slide creation, web pages, image and video generation, and more. But the real power is extensibility: add your own skills, replace the built-in ones, or combine them into compound workflows.
Skills are loaded progressively — only when the task needs them, not all at once. This keeps the context window lean and makes DeerFlow work well even with token-sensitive models.
When you install `.skill` archives through the Gateway, DeerFlow accepts standard optional frontmatter metadata such as `version`, `author`, and `compatibility` instead of rejecting otherwise valid external skills.
Tools follow the same philosophy. DeerFlow comes with a core toolset — web search, web fetch, file operations, bash execution — and supports custom tools via MCP servers and Python functions. Swap anything. Add anything.
Gateway-generated follow-up suggestions now normalize both plain-string model output and block/list-style rich content before parsing the JSON array response, so provider-specific content wrappers do not silently drop suggestions.
```
# Paths inside the sandbox container
/mnt/skills/public
├── research/SKILL.md
├── report-generation/SKILL.md
├── slide-creation/SKILL.md
├── web-page/SKILL.md
└── image-generation/SKILL.md
/mnt/skills/custom
└── your-custom-skill/SKILL.md ← yours
```
#### Claude Code Integration
The `claude-to-deerflow` skill lets you interact with a running DeerFlow instance directly from [Claude Code](https://docs.anthropic.com/en/docs/claude-code). Send research tasks, check status, manage threads — all without leaving the terminal.
**Install the skill**:
```bash
npx skills add https://github.com/bytedance/deer-flow --skill claude-to-deerflow
```
Then make sure DeerFlow is running (default at `http://localhost:2026`) and use the `/claude-to-deerflow` command in Claude Code.
**What you can do**:
- Send messages to DeerFlow and get streaming responses
- Choose execution modes: flash (fast), standard, pro (planning), ultra (sub-agents)
- Check DeerFlow health, list models/skills/agents
- Manage threads and conversation history
- Upload files for analysis
**Environment variables** (optional, for custom endpoints):
```bash
DEERFLOW_URL=http://localhost:2026 # Unified proxy base URL
DEERFLOW_GATEWAY_URL=http://localhost:2026 # Gateway API
DEERFLOW_LANGGRAPH_URL=http://localhost:2026/api/langgraph # LangGraph API
```
See [`skills/public/claude-to-deerflow/SKILL.md`](skills/public/claude-to-deerflow/SKILL.md) for the full API reference.
### Sub-Agents
Complex tasks rarely fit in a single pass. DeerFlow decomposes them.
The lead agent can spawn sub-agents on the fly — each with its own scoped context, tools, and termination conditions. Sub-agents run in parallel when possible, report back structured results, and the lead agent synthesizes everything into a coherent output.
This is how DeerFlow handles tasks that take minutes to hours: a research task might fan out into a dozen sub-agents, each exploring a different angle, then converge into a single report — or a website — or a slide deck with generated visuals. One harness, many hands.
### Sandbox & File System
DeerFlow doesn't just *talk* about doing things. It has its own computer.
Each task runs inside an isolated Docker container with a full filesystem — skills, workspace, uploads, outputs. The agent reads, writes, and edits files. It executes bash commands and codes. It views images. All sandboxed, all auditable, zero contamination between sessions.
This is the difference between a chatbot with tool access and an agent with an actual execution environment.
```
# Paths inside the sandbox container
/mnt/user-data/
├── uploads/ ← your files
├── workspace/ ← agents' working directory
└── outputs/ ← final deliverables
```
### Context Engineering
**Isolated Sub-Agent Context**: Each sub-agent runs in its own isolated context. This means that the sub-agent will not be able to see the context of the main agent or other sub-agents. This is important to ensure that the sub-agent is able to focus on the task at hand and not be distracted by the context of the main agent or other sub-agents.
**Summarization**: Within a session, DeerFlow manages context aggressively — summarizing completed sub-tasks, offloading intermediate results to the filesystem, compressing what's no longer immediately relevant. This lets it stay sharp across long, multi-step tasks without blowing the context window.
### Long-Term Memory
Most agents forget everything the moment a conversation ends. DeerFlow remembers.
Across sessions, DeerFlow builds a persistent memory of your profile, preferences, and accumulated knowledge. The more you use it, the better it knows you — your writing style, your technical stack, your recurring workflows. Memory is stored locally and stays under your control.
Memory updates now skip duplicate fact entries at apply time, so repeated preferences and context do not accumulate endlessly across sessions.
## Recommended Models
DeerFlow is model-agnostic — it works with any LLM that implements the OpenAI-compatible API. That said, it performs best with models that support:
- **Long context windows** (100k+ tokens) for deep research and multi-step tasks
- **Reasoning capabilities** for adaptive planning and complex decomposition
- **Multimodal inputs** for image understanding and video comprehension
- **Strong tool-use** for reliable function calling and structured outputs
## Embedded Python Client
DeerFlow can be used as an embedded Python library without running the full HTTP services. The `DeerFlowClient` provides direct in-process access to all agent and Gateway capabilities, returning the same response schemas as the HTTP Gateway API:
```python
from deerflow.client import DeerFlowClient
client = DeerFlowClient()
# Chat
response = client.chat("Analyze this paper for me", thread_id="my-thread")
# Streaming (LangGraph SSE protocol: values, messages-tuple, end)
for event in client.stream("hello"):
if event.type == "messages-tuple" and event.data.get("type") == "ai":
print(event.data["content"])
# Configuration & management — returns Gateway-aligned dicts
models = client.list_models() # {"models": [...]}
skills = client.list_skills() # {"skills": [...]}
client.update_skill("web-search", enabled=True)
client.upload_files("thread-1", ["./report.pdf"]) # {"success": True, "files": [...]}
```
All dict-returning methods are validated against Gateway Pydantic response models in CI (`TestGatewayConformance`), ensuring the embedded client stays in sync with the HTTP API schemas. See `backend/packages/harness/deerflow/client.py` for full API documentation.
## Documentation
- [Contributing Guide](CONTRIBUTING.md) - Development environment setup and workflow
- [Configuration Guide](backend/docs/CONFIGURATION.md) - Setup and configuration instructions
- [Architecture Overview](backend/CLAUDE.md) - Technical architecture details
- [Backend Architecture](backend/README.md) - Backend architecture and API reference
## Contributing
We welcome contributions! Please see [CONTRIBUTING.md](CONTRIBUTING.md) for development setup, workflow, and guidelines.
Regression coverage includes Docker sandbox mode detection and provisioner kubeconfig-path handling tests in `backend/tests/`.
## License
This project is open source and available under the [MIT License](./LICENSE).
## Acknowledgments
DeerFlow is built upon the incredible work of the open-source community. We are deeply grateful to all the projects and contributors whose efforts have made DeerFlow possible. Truly, we stand on the shoulders of giants.
We would like to extend our sincere appreciation to the following projects for their invaluable contributions:
- **[LangChain](https://github.com/langchain-ai/langchain)**: Their exceptional framework powers our LLM interactions and chains, enabling seamless integration and functionality.
- **[LangGraph](https://github.com/langchain-ai/langgraph)**: Their innovative approach to multi-agent orchestration has been instrumental in enabling DeerFlow's sophisticated workflows.
These projects exemplify the transformative power of open-source collaboration, and we are proud to build upon their foundations.
### Key Contributors
A heartfelt thank you goes out to the core authors of `DeerFlow`, whose vision, passion, and dedication have brought this project to life:
- **[Daniel Walnut](https://github.com/hetaoBackend/)**
- **[Henry Li](https://github.com/magiccube/)**
Your unwavering commitment and expertise have been the driving force behind DeerFlow's success. We are honored to have you at the helm of this journey.
## Star History
[](https://star-history.com/#bytedance/deer-flow&Date)
================================================
FILE: README_ja.md
================================================
# 🦌 DeerFlow - 2.0
[English](./README.md) | [中文](./README_zh.md) | 日本語
[](./backend/pyproject.toml)
[](./Makefile)
[](./LICENSE)
<a href="https://trendshift.io/repositories/14699" target="_blank"><img src="https://trendshift.io/api/badge/repositories/14699" alt="bytedance%2Fdeer-flow | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
> 2026年2月28日、バージョン2のリリースに伴い、DeerFlowはGitHub Trendingで🏆 第1位を獲得しました。素晴らしいコミュニティの皆さん、ありがとうございます!💪🔥
DeerFlow(**D**eep **E**xploration and **E**fficient **R**esearch **Flow**)は、**サブエージェント**、**メモリ**、**サンドボックス**を統合し、**拡張可能なスキル**によってあらゆるタスクを実行できるオープンソースの**スーパーエージェントハーネス**です。
https://github.com/user-attachments/assets/a8bcadc4-e040-4cf2-8fda-dd768b999c18
> [!NOTE]
> **DeerFlow 2.0はゼロからの完全な書き直しです。** v1とコードを共有していません。オリジナルのDeep Researchフレームワークをお探しの場合は、[`1.x`ブランチ](https://github.com/bytedance/deer-flow/tree/main-1.x)で引き続きメンテナンスされています。現在の開発は2.0に移行しています。
## 公式ウェブサイト
[<img width="2880" height="1600" alt="image" src="https://github.com/user-attachments/assets/a598c49f-3b2f-41ea-a052-05e21349188a" />](https://deerflow.tech)
**実際のデモ**は[**公式ウェブサイト**](https://deerflow.tech)でご覧いただけます。
## ByteDance Volcengine のコーディングプラン
<img width="4808" height="2400" alt="英文方舟" src="https://github.com/user-attachments/assets/2ecc7b9d-50be-4185-b1f7-5542d222fb2d" />
- DeerFlowの実行には、Doubao-Seed-2.0-Code、DeepSeek v3.2、Kimi 2.5の使用を強く推奨します
- [詳細はこちら](https://www.byteplus.com/en/activity/codingplan?utm_campaign=deer_flow&utm_content=deer_flow&utm_medium=devrel&utm_source=OWO&utm_term=deer_flow)
- [中国大陸の開発者はこちらをクリック](https://www.volcengine.com/activity/codingplan?utm_campaign=deer_flow&utm_content=deer_flow&utm_medium=devrel&utm_source=OWO&utm_term=deer_flow)
## InfoQuest
DeerFlowは、BytePlusが独自に開発したインテリジェント検索・クローリングツールセット「[InfoQuest(無料オンライン体験対応)](https://docs.byteplus.com/en/docs/InfoQuest/What_is_Info_Quest)」を新たに統合しました。
<a href="https://docs.byteplus.com/en/docs/InfoQuest/What_is_Info_Quest" target="_blank">
<img
src="https://sf16-sg.tiktokcdn.com/obj/eden-sg/hubseh7bsbps/20251208-160108.png" alt="InfoQuest_banner"
/>
</a>
---
## 目次
- [🦌 DeerFlow - 2.0](#-deerflow---20)
- [公式ウェブサイト](#公式ウェブサイト)
- [InfoQuest](#infoquest)
- [目次](#目次)
- [クイックスタート](#クイックスタート)
- [設定](#設定)
- [アプリケーションの実行](#アプリケーションの実行)
- [オプション1: Docker(推奨)](#オプション1-docker推奨)
- [オプション2: ローカル開発](#オプション2-ローカル開発)
- [詳細設定](#詳細設定)
- [サンドボックスモード](#サンドボックスモード)
- [MCPサーバー](#mcpサーバー)
- [IMチャネル](#imチャネル)
- [Deep Researchからスーパーエージェントハーネスへ](#deep-researchからスーパーエージェントハーネスへ)
- [コア機能](#コア機能)
- [スキルとツール](#スキルとツール)
- [Claude Code連携](#claude-code連携)
- [サブエージェント](#サブエージェント)
- [サンドボックスとファイルシステム](#サンドボックスとファイルシステム)
- [コンテキストエンジニアリング](#コンテキストエンジニアリング)
- [長期メモリ](#長期メモリ)
- [推奨モデル](#推奨モデル)
- [組み込みPythonクライアント](#組み込みpythonクライアント)
- [ドキュメント](#ドキュメント)
- [コントリビュート](#コントリビュート)
- [ライセンス](#ライセンス)
- [謝辞](#謝辞)
- [主要コントリビューター](#主要コントリビューター)
- [Star History](#star-history)
## クイックスタート
### 設定
1. **DeerFlowリポジトリをクローン**
```bash
git clone https://github.com/bytedance/deer-flow.git
cd deer-flow
```
2. **ローカル設定ファイルの生成**
プロジェクトルートディレクトリ(`deer-flow/`)から以下を実行します:
```bash
make config
```
このコマンドは、提供されたテンプレートに基づいてローカル設定ファイルを作成します。
3. **使用するモデルの設定**
`config.yaml`を編集し、少なくとも1つのモデルを定義します:
```yaml
models:
- name: gpt-4 # 内部識別子
display_name: GPT-4 # 表示名
use: langchain_openai:ChatOpenAI # LangChainクラスパス
model: gpt-4 # API用モデル識別子
api_key: $OPENAI_API_KEY # APIキー(推奨:環境変数を使用)
max_tokens: 4096 # リクエストあたりの最大トークン数
temperature: 0.7 # サンプリング温度
- name: openrouter-gemini-2.5-flash
display_name: Gemini 2.5 Flash (OpenRouter)
use: langchain_openai:ChatOpenAI
model: google/gemini-2.5-flash-preview
api_key: $OPENAI_API_KEY # OpenRouterもここではOpenAI互換のフィールド名を使用
base_url: https://openrouter.ai/api/v1
```
OpenRouterやOpenAI互換のゲートウェイは、`langchain_openai:ChatOpenAI`と`base_url`で設定します。プロバイダー固有の環境変数名を使用したい場合は、`api_key`でその変数を明示的に指定してください(例:`api_key: $OPENROUTER_API_KEY`)。
4. **設定したモデルのAPIキーを設定**
以下のいずれかの方法を選択してください:
- オプションA:プロジェクトルートの`.env`ファイルを編集(推奨)
```bash
TAVILY_API_KEY=your-tavily-api-key
OPENAI_API_KEY=your-openai-api-key
# OpenRouterもlangchain_openai:ChatOpenAI + base_url使用時はOPENAI_API_KEYを使用します。
# 必要に応じて他のプロバイダーキーを追加
INFOQUEST_API_KEY=your-infoquest-api-key
```
- オプションB:シェルで環境変数をエクスポート
```bash
export OPENAI_API_KEY=your-openai-api-key
```
- オプションC:`config.yaml`を直接編集(本番環境には非推奨)
```yaml
models:
- name: gpt-4
api_key: your-actual-api-key-here # プレースホルダーを置換
```
### アプリケーションの実行
#### オプション1: Docker(推奨)
**開発環境**(ホットリロード、ソースマウント):
```bash
make docker-init # サンドボックスイメージをプル(初回またはイメージ更新時のみ)
make docker-start # サービスを開始(config.yamlからサンドボックスモードを自動検出)
```
`make docker-start`は、`config.yaml`がプロビジョナーモード(`sandbox.use: deerflow.community.aio_sandbox:AioSandboxProvider`と`provisioner_url`)を使用している場合にのみ`provisioner`を起動します。
**本番環境**(ローカルでイメージをビルドし、ランタイム設定とデータをマウント):
```bash
make up # イメージをビルドして全本番サービスを開始
make down # コンテナを停止して削除
```
> [!NOTE]
> LangGraphエージェントサーバーは現在`langgraph dev`(オープンソースCLIサーバー)経由で実行されます。
アクセス: http://localhost:2026
詳細なDocker開発ガイドは[CONTRIBUTING.md](CONTRIBUTING.md)をご覧ください。
#### オプション2: ローカル開発
サービスをローカルで実行する場合:
前提条件:上記の「設定」手順を先に完了してください(`make config`とモデルAPIキー)。`make dev`には有効な設定ファイルが必要です(デフォルトはプロジェクトルートの`config.yaml`。`DEER_FLOW_CONFIG_PATH`で上書き可能)。
1. **前提条件の確認**:
```bash
make check # Node.js 22+、pnpm、uv、nginxを検証
```
2. **依存関係のインストール**:
```bash
make install # バックエンド+フロントエンドの依存関係をインストール
```
3. **(オプション)サンドボックスイメージの事前プル**:
```bash
# Docker/コンテナベースのサンドボックス使用時に推奨
make setup-sandbox
```
4. **サービスの開始**:
```bash
make dev
```
5. **アクセス**: http://localhost:2026
### 詳細設定
#### サンドボックスモード
DeerFlowは複数のサンドボックス実行モードをサポートしています:
- **ローカル実行**(ホストマシン上で直接サンドボックスコードを実行)
- **Docker実行**(分離されたDockerコンテナ内でサンドボックスコードを実行)
- **KubernetesによるDocker実行**(プロビジョナーサービス経由でKubernetesポッドでサンドボックスコードを実行)
Docker開発では、サービスの起動は`config.yaml`のサンドボックスモードに従います。ローカル/Dockerモードでは`provisioner`は起動されません。
お好みのモードの設定については[サンドボックス設定ガイド](backend/docs/CONFIGURATION.md#sandbox)をご覧ください。
#### MCPサーバー
DeerFlowは、機能を拡張するための設定可能なMCPサーバーとスキルをサポートしています。
HTTP/SSE MCPサーバーでは、OAuthトークンフロー(`client_credentials`、`refresh_token`)がサポートされています。
詳細な手順は[MCPサーバーガイド](backend/docs/MCP_SERVER.md)をご覧ください。
#### IMチャネル
DeerFlowはメッセージングアプリからのタスク受信をサポートしています。チャネルは設定時に自動的に開始されます。いずれもパブリックIPは不要です。
| チャネル | トランスポート | 難易度 |
|---------|-----------|------------|
| Telegram | Bot API(ロングポーリング) | 簡単 |
| Slack | Socket Mode | 中程度 |
| Feishu / Lark | WebSocket | 中程度 |
**`config.yaml`での設定:**
```yaml
channels:
# LangGraphサーバーURL(デフォルト: http://localhost:2024)
langgraph_url: http://localhost:2024
# Gateway API URL(デフォルト: http://localhost:8001)
gateway_url: http://localhost:8001
# オプション: 全モバイルチャネルのグローバルセッションデフォルト
session:
assistant_id: lead_agent
config:
recursion_limit: 100
context:
thinking_enabled: true
is_plan_mode: false
subagent_enabled: false
feishu:
enabled: true
app_id: $FEISHU_APP_ID
app_secret: $FEISHU_APP_SECRET
slack:
enabled: true
bot_token: $SLACK_BOT_TOKEN # xoxb-...
app_token: $SLACK_APP_TOKEN # xapp-...(Socket Mode)
allowed_users: [] # 空 = 全員許可
telegram:
enabled: true
bot_token: $TELEGRAM_BOT_TOKEN
allowed_users: [] # 空 = 全員許可
# オプション: チャネル/ユーザーごとのセッション設定
session:
assistant_id: mobile_agent
context:
thinking_enabled: false
users:
"123456789":
assistant_id: vip_agent
config:
recursion_limit: 150
context:
thinking_enabled: true
subagent_enabled: true
```
対応するAPIキーを`.env`ファイルに設定します:
```bash
# Telegram
TELEGRAM_BOT_TOKEN=123456789:ABCdefGHIjklMNOpqrSTUvwxYZ
# Slack
SLACK_BOT_TOKEN=xoxb-...
SLACK_APP_TOKEN=xapp-...
# Feishu / Lark
FEISHU_APP_ID=cli_xxxx
FEISHU_APP_SECRET=your_app_secret
```
**Telegramのセットアップ**
1. [@BotFather](https://t.me/BotFather)とチャットし、`/newbot`を送信してHTTP APIトークンをコピーします。
2. `.env`に`TELEGRAM_BOT_TOKEN`を設定し、`config.yaml`でチャネルを有効にします。
**Slackのセットアップ**
1. [api.slack.com/apps](https://api.slack.com/apps)でSlackアプリを作成 → 新規アプリ作成 → 最初から作成。
2. **OAuth & Permissions**で、Botトークンスコープを追加:`app_mentions:read`、`chat:write`、`im:history`、`im:read`、`im:write`、`files:write`。
3. **Socket Mode**を有効化 → `connections:write`スコープのApp-Levelトークン(`xapp-…`)を生成。
4. **Event Subscriptions**で、ボットイベントを購読:`app_mention`、`message.im`。
5. `.env`に`SLACK_BOT_TOKEN`と`SLACK_APP_TOKEN`を設定し、`config.yaml`でチャネルを有効にします。
**Feishu / Larkのセットアップ**
1. [Feishu Open Platform](https://open.feishu.cn/)でアプリを作成 → **ボット**機能を有効化。
2. 権限を追加:`im:message`、`im:message.p2p_msg:readonly`、`im:resource`。
3. **イベント**で`im.message.receive_v1`を購読し、**ロングコネクション**モードを選択。
4. App IDとApp Secretをコピー。`.env`に`FEISHU_APP_ID`と`FEISHU_APP_SECRET`を設定し、`config.yaml`でチャネルを有効にします。
**コマンド**
チャネル接続後、チャットから直接DeerFlowと対話できます:
| コマンド | 説明 |
|---------|-------------|
| `/new` | 新しい会話を開始 |
| `/status` | 現在のスレッド情報を表示 |
| `/models` | 利用可能なモデルを一覧表示 |
| `/memory` | メモリを表示 |
| `/help` | ヘルプを表示 |
> コマンドプレフィックスのないメッセージは通常のチャットとして扱われ、DeerFlowがスレッドを作成して会話形式で応答します。
## Deep Researchからスーパーエージェントハーネスへ
DeerFlowはDeep Researchフレームワークとして始まり、コミュニティがそれを大きく発展させました。リリース以来、開発者たちはリサーチを超えて活用してきました:データパイプラインの構築、スライドデッキの生成、ダッシュボードの立ち上げ、コンテンツワークフローの自動化。私たちが予想もしなかったことです。
これは重要なことを示していました:DeerFlowは単なるリサーチツールではなかったのです。それは**ハーネス**——エージェントが実際に仕事をこなすためのインフラを提供するランタイムでした。
そこで、ゼロから再構築しました。
DeerFlow 2.0は、もはやつなぎ合わせるフレームワークではありません。バッテリー同梱、完全に拡張可能なスーパーエージェントハーネスです。LangGraphとLangChainの上に構築され、エージェントが必要とするすべてを標準搭載しています:ファイルシステム、メモリ、スキル、サンドボックス実行、そして複雑なマルチステップタスクのためのプランニングとサブエージェントの生成機能。
そのまま使うもよし。分解して自分のものにするもよし。
## コア機能
### スキルとツール
スキルこそが、DeerFlowを*ほぼ何でもできる*ものにしています。
標準的なエージェントスキルは構造化された機能モジュールです——ワークフロー、ベストプラクティス、サポートリソースへの参照を定義するMarkdownファイルです。DeerFlowにはリサーチ、レポート生成、スライド作成、Webページ、画像・動画生成などの組み込みスキルが付属しています。しかし、真の力は拡張性にあります:独自のスキルを追加し、組み込みスキルを置き換え、複合ワークフローに組み合わせることができます。
スキルはプログレッシブに読み込まれます——タスクが必要とする時にのみ、一度にすべてではありません。これによりコンテキストウィンドウを軽量に保ち、トークンに敏感なモデルでもDeerFlowがうまく動作します。
Gateway経由で`.skill`アーカイブをインストールする際、DeerFlowは`version`、`author`、`compatibility`などの標準的なオプショナルフロントマターメタデータを受け入れ、有効な外部スキルを拒否しません。
ツールも同じ哲学に従います。DeerFlowにはコアツールセット——Web検索、Webフェッチ、ファイル操作、bash実行——が付属し、MCPサーバーやPython関数によるカスタムツールをサポートしています。何でも入れ替え可能、何でも追加可能です。
Gatewayが生成するフォローアップ提案は、プレーン文字列のモデル出力とブロック/リスト形式のリッチコンテンツの両方をJSON配列レスポンスの解析前に正規化するため、プロバイダー固有のコンテンツラッパーが提案をサイレントにドロップすることはありません。
```
# サンドボックスコンテナ内のパス
/mnt/skills/public
├── research/SKILL.md
├── report-generation/SKILL.md
├── slide-creation/SKILL.md
├── web-page/SKILL.md
└── image-generation/SKILL.md
/mnt/skills/custom
└── your-custom-skill/SKILL.md ← あなたのカスタムスキル
```
#### Claude Code連携
`claude-to-deerflow`スキルを使えば、[Claude Code](https://docs.anthropic.com/en/docs/claude-code)から直接、実行中のDeerFlowインスタンスと対話できます。リサーチタスクの送信、ステータスの確認、スレッドの管理——すべてターミナルから離れずに実行できます。
**スキルのインストール**:
```bash
npx skills add https://github.com/bytedance/deer-flow --skill claude-to-deerflow
```
DeerFlowが実行中であることを確認し(デフォルトは`http://localhost:2026`)、Claude Codeで`/claude-to-deerflow`コマンドを使用します。
**できること**:
- DeerFlowにメッセージを送信してストリーミングレスポンスを取得
- 実行モードの選択:flash(高速)、standard、pro(プランニング)、ultra(サブエージェント)
- DeerFlowのヘルスチェック、モデル/スキル/エージェントの一覧表示
- スレッドと会話履歴の管理
- 分析用ファイルのアップロード
**環境変数**(オプション、カスタムエンドポイント用):
```bash
DEERFLOW_URL=http://localhost:2026 # 統合プロキシベースURL
DEERFLOW_GATEWAY_URL=http://localhost:2026 # Gateway API
DEERFLOW_LANGGRAPH_URL=http://localhost:2026/api/langgraph # LangGraph API
```
完全なAPIリファレンスは[`skills/public/claude-to-deerflow/SKILL.md`](skills/public/claude-to-deerflow/SKILL.md)をご覧ください。
### サブエージェント
複雑なタスクは単一のパスに収まりません。DeerFlowはそれを分解します。
リードエージェントはオンザフライでサブエージェントを生成できます——それぞれ独自のスコープ付きコンテキスト、ツール、終了条件を持ちます。サブエージェントは可能な限り並列で実行され、構造化された結果を報告し、リードエージェントがすべてを一貫した出力に統合します。
これがDeerFlowが数分から数時間かかるタスクを処理する方法です:リサーチタスクが十数のサブエージェントに展開され、それぞれが異なる角度を探索し、1つのレポート——またはWebサイト——または生成されたビジュアル付きのスライドデッキに収束します。1つのハーネス、多くの手。
### サンドボックスとファイルシステム
DeerFlowは物事を*語る*だけではありません。自分のコンピューターを持っています。
各タスクは、完全なファイルシステムを持つ分離されたDockerコンテナ内で実行されます——スキル、ワークスペース、アップロード、出力。エージェントはファイルの読み書き・編集を行います。bashコマンドを実行し、コーディングを行います。画像を表示します。すべてサンドボックス化され、すべて監査可能で、セッション間の汚染はゼロです。
これが、ツールアクセスのあるチャットボットと、実際の実行環境を持つエージェントの違いです。
```
# サンドボックスコンテナ内のパス
/mnt/user-data/
├── uploads/ ← あなたのファイル
├── workspace/ ← エージェントの作業ディレクトリ
└── outputs/ ← 最終成果物
```
### コンテキストエンジニアリング
**分離されたサブエージェントコンテキスト**:各サブエージェントは独自の分離されたコンテキストで実行されます。これにより、サブエージェントはメインエージェントや他のサブエージェントのコンテキストを見ることができません。これは、サブエージェントが目の前のタスクに集中し、メインエージェントや他のサブエージェントのコンテキストに気を取られないようにするために重要です。
**要約化**:セッション内で、DeerFlowはコンテキストを積極的に管理します——完了したサブタスクの要約、中間結果のファイルシステムへのオフロード、もはや直接関係のないものの圧縮。これにより、コンテキストウィンドウを超えることなく、長いマルチステップタスク全体を通じてシャープさを維持します。
### 長期メモリ
ほとんどのエージェントは、会話が終わるとすべてを忘れます。DeerFlowは記憶します。
セッションをまたいで、DeerFlowはあなたのプロフィール、好み、蓄積された知識の永続的なメモリを構築します。使えば使うほど、あなたのことをよく知るようになります——あなたの文体、技術スタック、繰り返されるワークフロー。メモリはローカルに保存され、あなたの管理下にあります。
メモリ更新は適用時に重複するファクトエントリをスキップするようになり、繰り返される好みやコンテキストがセッションをまたいで際限なく蓄積されることはありません。
## 推奨モデル
DeerFlowはモデルに依存しません——OpenAI互換APIを実装する任意のLLMで動作します。とはいえ、以下をサポートするモデルで最高のパフォーマンスを発揮します:
- **長いコンテキストウィンドウ**(10万トークン以上):深いリサーチとマルチステップタスク向け
- **推論能力**:適応的なプランニングと複雑な分解向け
- **マルチモーダル入力**:画像理解と動画理解向け
- **強力なツール使用**:信頼性の高いファンクションコーリングと構造化された出力向け
## 組み込みPythonクライアント
DeerFlowは、完全なHTTPサービスを実行せずに組み込みPythonライブラリとして使用できます。`DeerFlowClient`は、すべてのエージェントとGateway機能へのプロセス内直接アクセスを提供し、HTTP Gateway APIと同じレスポンススキーマを返します:
```python
from deerflow.client import DeerFlowClient
client = DeerFlowClient()
# チャット
response = client.chat("Analyze this paper for me", thread_id="my-thread")
# ストリーミング(LangGraph SSEプロトコル:values、messages-tuple、end)
for event in client.stream("hello"):
if event.type == "messages-tuple" and event.data.get("type") == "ai":
print(event.data["content"])
# 設定&管理 — Gateway準拠のdictを返す
models = client.list_models() # {"models": [...]}
skills = client.list_skills() # {"skills": [...]}
client.update_skill("web-search", enabled=True)
client.upload_files("thread-1", ["./report.pdf"]) # {"success": True, "files": [...]}
```
すべてのdict返却メソッドはCIでGateway Pydanticレスポンスモデルに対して検証されており(`TestGatewayConformance`)、組み込みクライアントがHTTP APIスキーマと同期していることを保証します。完全なAPIドキュメントは`backend/packages/harness/deerflow/client.py`をご覧ください。
## ドキュメント
- [コントリビュートガイド](CONTRIBUTING.md) - 開発環境のセットアップとワークフロー
- [設定ガイド](backend/docs/CONFIGURATION.md) - セットアップと設定の手順
- [アーキテクチャ概要](backend/CLAUDE.md) - 技術的なアーキテクチャの詳細
- [バックエンドアーキテクチャ](backend/README.md) - バックエンドアーキテクチャとAPIリファレンス
## コントリビュート
コントリビューションを歓迎します!開発環境のセットアップ、ワークフロー、ガイドラインについては[CONTRIBUTING.md](CONTRIBUTING.md)をご覧ください。
回帰テストのカバレッジには、`backend/tests/`でのDockerサンドボックスモード検出とプロビジョナーkubeconfig-pathハンドリングテストが含まれます。
## ライセンス
このプロジェクトはオープンソースであり、[MITライセンス](./LICENSE)の下で提供されています。
## 謝辞
DeerFlowはオープンソースコミュニティの素晴らしい成果の上に構築されています。DeerFlowを可能にしてくれたすべてのプロジェクトとコントリビューターに深く感謝いたします。まさに、巨人の肩の上に立っています。
以下のプロジェクトの貴重な貢献に心からの感謝を申し上げます:
- **[LangChain](https://github.com/langchain-ai/langchain)**:その優れたフレームワークがLLMのインタラクションとチェーンを支え、シームレスな統合と機能を実現しています。
- **[LangGraph](https://github.com/langchain-ai/langgraph)**:マルチエージェントオーケストレーションへの革新的なアプローチが、DeerFlowの洗練されたワークフローの実現に大きく貢献しています。
これらのプロジェクトはオープンソースコラボレーションの変革的な力を体現しており、その基盤の上に構築できることを誇りに思います。
### 主要コントリビューター
`DeerFlow`のコア著者に心からの感謝を捧げます。そのビジョン、情熱、献身がこのプロジェクトに命を吹き込みました:
- **[Daniel Walnut](https://github.com/hetaoBackend/)**
- **[Henry Li](https://github.com/magiccube/)**
揺るぎないコミットメントと専門知識が、DeerFlowの成功の原動力です。この旅の先頭に立ってくださっていることを光栄に思います。
## Star History
[](https://star-history.com/#bytedance/deer-flow&Date)
================================================
FILE: README_zh.md
================================================
# 🦌 DeerFlow - 2.0
[English](./README.md) | 中文 | [日本語](./README_ja.md)
[](./backend/pyproject.toml)
[](./Makefile)
[](./LICENSE)
<a href="https://trendshift.io/repositories/14699" target="_blank"><img src="https://trendshift.io/api/badge/repositories/14699" alt="bytedance%2Fdeer-flow | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
> 2026 年 2 月 28 日,DeerFlow 2 发布后登上 GitHub Trending 第 1 名。非常感谢社区的支持,这是大家一起做到的。
DeerFlow(**D**eep **E**xploration and **E**fficient **R**esearch **Flow**)是一个开源的 **super agent harness**。它把 **sub-agents**、**memory** 和 **sandbox** 组织在一起,再配合可扩展的 **skills**,让 agent 可以完成几乎任何事情。
https://github.com/user-attachments/assets/a8bcadc4-e040-4cf2-8fda-dd768b999c18
> [!NOTE]
> **DeerFlow 2.0 是一次彻底重写。** 它和 v1 没有共用代码。如果你要找的是最初的 Deep Research 框架,可以前往 [`1.x` 分支](https://github.com/bytedance/deer-flow/tree/main-1.x)。那里仍然欢迎贡献;当前的主要开发已经转向 2.0。
## 官网
[<img width="2880" height="1600" alt="image" src="https://github.com/user-attachments/assets/a598c49f-3b2f-41ea-a052-05e21349188a" />](https://deerflow.tech)
想了解更多,或者直接看**真实演示**,可以访问[**官网**](https://deerflow.tech)。
## 字节跳动火山引擎方舟 Coding Plan
<img width="4808" height="2400" alt="codingplan -banner 素材" src="https://github.com/user-attachments/assets/d30dae52-84f2-4021-b32f-6d281252b9ea" />
- 我们推荐使用 Doubao-Seed-2.0-Code、DeepSeek v3.2 和 Kimi 2.5 运行 DeerFlow
- [现在就加入 Coding Plan](https://www.volcengine.com/activity/codingplan?utm_campaign=deer_flow&utm_content=deer_flow&utm_medium=devrel&utm_source=OWO&utm_term=deer_flow)
- [海外地区的开发者请点击这里](https://www.byteplus.com/en/activity/codingplan?utm_campaign=deer_flow&utm_content=deer_flow&utm_medium=devrel&utm_source=OWO&utm_term=deer_flow)
## 目录
- [🦌 DeerFlow - 2.0](#-deerflow---20)
- [官网](#官网)
- [InfoQuest](#infoquest)
- [目录](#目录)
- [快速开始](#快速开始)
- [配置](#配置)
- [运行应用](#运行应用)
- [方式一:Docker(推荐)](#方式一docker推荐)
- [方式二:本地开发](#方式二本地开发)
- [进阶配置](#进阶配置)
- [Sandbox 模式](#sandbox-模式)
- [MCP Server](#mcp-server)
- [IM 渠道](#im-渠道)
- [从 Deep Research 到 Super Agent Harness](#从-deep-research-到-super-agent-harness)
- [核心特性](#核心特性)
- [Skills 与 Tools](#skills-与-tools)
- [Claude Code 集成](#claude-code-集成)
- [Sub-Agents](#sub-agents)
- [Sandbox 与文件系统](#sandbox-与文件系统)
- [Context Engineering](#context-engineering)
- [长期记忆](#长期记忆)
- [推荐模型](#推荐模型)
- [内嵌 Python Client](#内嵌-python-client)
- [文档](#文档)
- [参与贡献](#参与贡献)
- [许可证](#许可证)
- [致谢](#致谢)
- [核心贡献者](#核心贡献者)
- [Star History](#star-history)
## 快速开始
### 配置
1. **克隆 DeerFlow 仓库**
```bash
git clone https://github.com/bytedance/deer-flow.git
cd deer-flow
```
2. **生成本地配置文件**
在项目根目录(`deer-flow/`)执行:
```bash
make config
```
这个命令会基于示例模板生成本地配置文件。
3. **配置你要使用的模型**
编辑 `config.yaml`,至少定义一个模型:
```yaml
models:
- name: gpt-4 # 内部标识
display_name: GPT-4 # 展示名称
use: langchain_openai:ChatOpenAI # LangChain 类路径
model: gpt-4 # API 使用的模型标识
api_key: $OPENAI_API_KEY # API key(推荐使用环境变量)
max_tokens: 4096 # 单次请求最大 tokens
temperature: 0.7 # 采样温度
- name: openrouter-gemini-2.5-flash
display_name: Gemini 2.5 Flash (OpenRouter)
use: langchain_openai:ChatOpenAI
model: google/gemini-2.5-flash-preview
api_key: $OPENAI_API_KEY # 这里 OpenRouter 依然沿用 OpenAI 兼容字段名
base_url: https://openrouter.ai/api/v1
```
OpenRouter 以及类似的 OpenAI 兼容网关,建议通过 `langchain_openai:ChatOpenAI` 配合 `base_url` 来配置。如果你更想用 provider 自己的环境变量名,也可以直接把 `api_key` 指向对应变量,例如 `api_key: $OPENROUTER_API_KEY`。
4. **为已配置的模型设置 API key**
可任选以下一种方式:
- 方式 A:编辑项目根目录下的 `.env` 文件(推荐)
```bash
TAVILY_API_KEY=your-tavily-api-key
OPENAI_API_KEY=your-openai-api-key
# 如果配置使用的是 langchain_openai:ChatOpenAI + base_url,OpenRouter 也会读取 OPENAI_API_KEY
# 其他 provider 的 key 按需补充
INFOQUEST_API_KEY=your-infoquest-api-key
```
- 方式 B:在 shell 中导出环境变量
```bash
export OPENAI_API_KEY=your-openai-api-key
```
- 方式 C:直接编辑 `config.yaml`(不建议用于生产环境)
```yaml
models:
- name: gpt-4
api_key: your-actual-api-key-here # 替换为真实 key
```
### 运行应用
#### 方式一:Docker(推荐)
**开发模式**(支持热更新,挂载源码):
```bash
make docker-init # 拉取 sandbox 镜像(首次运行或镜像更新时执行)
make docker-start # 启动服务(会根据 config.yaml 自动判断 sandbox 模式)
```
如果 `config.yaml` 使用的是 provisioner 模式(`sandbox.use: deerflow.community.aio_sandbox:AioSandboxProvider` 且配置了 `provisioner_url`),`make docker-start` 才会启动 `provisioner`。
**生产模式**(本地构建镜像,并挂载运行期配置与数据):
```bash
make up # 构建镜像并启动全部生产服务
make down # 停止并移除容器
```
> [!NOTE]
> 当前 LangGraph agent server 通过开源 CLI 服务 `langgraph dev` 运行。
访问地址:http://localhost:2026
更完整的 Docker 开发说明见 [CONTRIBUTING.md](CONTRIBUTING.md)。
#### 方式二:本地开发
如果你更希望直接在本地启动各个服务:
前提:先完成上面的“配置”步骤(`make config` 和模型 API key 配置)。`make dev` 需要有效配置文件,默认读取项目根目录下的 `config.yaml`,也可以通过 `DEER_FLOW_CONFIG_PATH` 覆盖。
1. **检查依赖环境**:
```bash
make check # 校验 Node.js 22+、pnpm、uv、nginx
```
2. **安装依赖**:
```bash
make install # 安装 backend + frontend 依赖
```
3. **(可选)预拉取 sandbox 镜像**:
```bash
# 如果使用 Docker / Container sandbox,建议先执行
make setup-sandbox
```
4. **启动服务**:
```bash
make dev
```
5. **访问地址**:http://localhost:2026
### 进阶配置
#### Sandbox 模式
DeerFlow 支持多种 sandbox 执行方式:
- **本地执行**(直接在宿主机上运行 sandbox 代码)
- **Docker 执行**(在隔离的 Docker 容器里运行 sandbox 代码)
- **Docker + Kubernetes 执行**(通过 provisioner 服务在 Kubernetes Pod 中运行 sandbox 代码)
Docker 开发时,服务启动行为会遵循 `config.yaml` 里的 sandbox 模式。在 Local / Docker 模式下,不会启动 `provisioner`。
如果要配置你自己的模式,参见 [Sandbox 配置指南](backend/docs/CONFIGURATION.md#sandbox)。
#### MCP Server
DeerFlow 支持可配置的 MCP Server 和 skills,用来扩展能力。
对于 HTTP/SSE MCP Server,还支持 OAuth token 流程(`client_credentials`、`refresh_token`)。
详细说明见 [MCP Server 指南](backend/docs/MCP_SERVER.md)。
#### IM 渠道
DeerFlow 支持从即时通讯应用接收任务。只要配置完成,对应渠道会自动启动,而且都不需要公网 IP。
| 渠道 | 传输方式 | 上手难度 |
|---------|-----------|------------|
| Telegram | Bot API(long-polling) | 简单 |
| Slack | Socket Mode | 中等 |
| Feishu / Lark | WebSocket | 中等 |
**`config.yaml` 中的配置示例:**
```yaml
channels:
# LangGraph Server URL(默认:http://localhost:2024)
langgraph_url: http://localhost:2024
# Gateway API URL(默认:http://localhost:8001)
gateway_url: http://localhost:8001
# 可选:所有移动端渠道共用的全局 session 默认值
session:
assistant_id: lead_agent
config:
recursion_limit: 100
context:
thinking_enabled: true
is_plan_mode: false
subagent_enabled: false
feishu:
enabled: true
app_id: $FEISHU_APP_ID
app_secret: $FEISHU_APP_SECRET
slack:
enabled: true
bot_token: $SLACK_BOT_TOKEN # xoxb-...
app_token: $SLACK_APP_TOKEN # xapp-...(Socket Mode)
allowed_users: [] # 留空表示允许所有人
telegram:
enabled: true
bot_token: $TELEGRAM_BOT_TOKEN
allowed_users: [] # 留空表示允许所有人
# 可选:按渠道 / 按用户单独覆盖 session 配置
session:
assistant_id: mobile_agent
context:
thinking_enabled: false
users:
"123456789":
assistant_id: vip_agent
config:
recursion_limit: 150
context:
thinking_enabled: true
subagent_enabled: true
```
在 `.env` 里设置对应的 API key:
```bash
# Telegram
TELEGRAM_BOT_TOKEN=123456789:ABCdefGHIjklMNOpqrSTUvwxYZ
# Slack
SLACK_BOT_TOKEN=xoxb-...
SLACK_APP_TOKEN=xapp-...
# Feishu / Lark
FEISHU_APP_ID=cli_xxxx
FEISHU_APP_SECRET=your_app_secret
```
**Telegram 配置**
1. 打开 [@BotFather](https://t.me/BotFather),发送 `/newbot`,复制生成的 HTTP API token。
2. 在 `.env` 中设置 `TELEGRAM_BOT_TOKEN`,并在 `config.yaml` 里启用该渠道。
**Slack 配置**
1. 前往 [api.slack.com/apps](https://api.slack.com/apps) 创建 Slack App:Create New App → From scratch。
2. 在 **OAuth & Permissions** 中添加 Bot Token Scopes:`app_mentions:read`、`chat:write`、`im:history`、`im:read`、`im:write`、`files:write`。
3. 启用 **Socket Mode**,生成带 `connections:write` 权限的 App-Level Token(`xapp-...`)。
4. 在 **Event Subscriptions** 中订阅 bot events:`app_mention`、`message.im`。
5. 在 `.env` 中设置 `SLACK_BOT_TOKEN` 和 `SLACK_APP_TOKEN`,并在 `config.yaml` 中启用该渠道。
**Feishu / Lark 配置**
1. 在 [飞书开放平台](https://open.feishu.cn/) 创建应用,并启用 **Bot** 能力。
2. 添加权限:`im:message`、`im:message.p2p_msg:readonly`、`im:resource`。
3. 在 **事件订阅** 中订阅 `im.message.receive_v1`,连接方式选择 **长连接**。
4. 复制 App ID 和 App Secret,在 `.env` 中设置 `FEISHU_APP_ID` 和 `FEISHU_APP_SECRET`,并在 `config.yaml` 中启用该渠道。
**命令**
渠道连接完成后,你可以直接在聊天窗口里和 DeerFlow 交互:
| 命令 | 说明 |
|---------|-------------|
| `/new` | 开启新对话 |
| `/status` | 查看当前 thread 信息 |
| `/models` | 列出可用模型 |
| `/memory` | 查看 memory |
| `/help` | 查看帮助 |
> 没有命令前缀的消息会被当作普通聊天处理。DeerFlow 会自动创建 thread,并以对话方式回复。
## 从 Deep Research 到 Super Agent Harness
DeerFlow 最初是一个 Deep Research 框架,后来社区把它一路推到了更远的地方。上线之后,开发者拿它去做的事情早就不止研究:搭数据流水线、生成演示文稿、快速起 dashboard、自动化内容流程,很多方向一开始连我们自己都没想到。
这让我们意识到一件事:DeerFlow 不只是一个研究工具。它更像一个 **harness**,一个真正让 agents 把事情做完的运行时基础设施。
所以我们把它从头重做了一遍。
DeerFlow 2.0 不再是一个需要你自己拼装的 framework。它是一个开箱即用、同时又足够可扩展的 super agent harness。基于 LangGraph 和 LangChain 构建,默认就带上了 agent 真正会用到的关键能力:文件系统、memory、skills、sandbox 执行环境,以及为复杂多步骤任务做规划、拉起 sub-agents 的能力。
你可以直接拿来用,也可以拆开重组,改成你自己的样子。
## 核心特性
### Skills 与 Tools
Skills 是 DeerFlow 能做“几乎任何事”的关键。
标准的 Agent Skill 是一种结构化能力模块,通常就是一个 Markdown 文件,里面定义了工作流、最佳实践,以及相关的参考资源。DeerFlow 自带一批内置 skills,覆盖研究、报告生成、演示文稿制作、网页生成、图像和视频生成等场景。真正有意思的地方在于它的扩展性:你可以加自己的 skills,替换内置 skills,或者把多个 skills 组合成复合工作流。
Skills 采用按需渐进加载,不会一次性把所有内容都塞进上下文。只有任务确实需要时才加载,这样能把上下文窗口控制得更干净,也更适合对 token 比较敏感的模型。
通过 Gateway 安装 `.skill` 压缩包时,DeerFlow 会接受标准的可选 frontmatter 元数据,比如 `version`、`author`、`compatibility`,不会把本来合法的外部 skill 拒之门外。
Tools 也是同样的思路。DeerFlow 自带一组核心工具:网页搜索、网页抓取、文件操作、bash 执行;同时也支持通过 MCP Server 和 Python 函数扩展自定义工具。你可以替换任何一项,也可以继续往里加。
Gateway 生成后续建议时,现在会先把普通字符串输出和 block/list 风格的富文本内容统一归一化,再去解析 JSON 数组响应,因此不同 provider 的内容包装方式不会再悄悄把建议吞掉。
```text
# sandbox 容器内的路径
/mnt/skills/public
├── research/SKILL.md
├── report-generation/SKILL.md
├── slide-creation/SKILL.md
├── web-page/SKILL.md
└── image-generation/SKILL.md
/mnt/skills/custom
└── your-custom-skill/SKILL.md ← 你的 skill
```
#### Claude Code 集成
借助 `claude-to-deerflow` skill,你可以直接在 [Claude Code](https://docs.anthropic.com/en/docs/claude-code) 里和正在运行的 DeerFlow 实例交互。不用离开终端,就能下发研究任务、查看状态、管理 threads。
**安装这个 skill:**
```bash
npx skills add https://github.com/bytedance/deer-flow --skill claude-to-deerflow
```
然后确认 DeerFlow 已经启动(默认地址是 `http://localhost:2026`),在 Claude Code 里使用 `/claude-to-deerflow` 命令即可。
**你可以做的事情包括:**
- 给 DeerFlow 发送消息,并接收流式响应
- 选择执行模式:flash(更快)、standard、pro(规划模式)、ultra(sub-agents 模式)
- 检查 DeerFlow 健康状态,列出 models / skills / agents
- 管理 threads 和会话历史
- 上传文件做分析
**环境变量**(可选,用于自定义端点):
```bash
DEERFLOW_URL=http://localhost:2026 # 统一代理基地址
DEERFLOW_GATEWAY_URL=http://localhost:2026 # Gateway API
DEERFLOW_LANGGRAPH_URL=http://localhost:2026/api/langgraph # LangGraph API
```
完整 API 说明见 [`skills/public/claude-to-deerflow/SKILL.md`](skills/public/claude-to-deerflow/SKILL.md)。
### Sub-Agents
复杂任务通常不可能一次完成,DeerFlow 会先拆解,再执行。
lead agent 可以按需动态拉起 sub-agents。每个 sub-agent 都有自己独立的上下文、工具和终止条件。只要条件允许,它们就会并行运行,返回结构化结果,最后再由 lead agent 汇总成一份完整输出。
这也是 DeerFlow 能处理从几分钟到几小时任务的原因。比如一个研究任务,可以拆成十几个 sub-agents,分别探索不同方向,最后合并成一份报告,或者一个网站,或者一套带生成视觉内容的演示文稿。一个 harness,多路并行。
### Sandbox 与文件系统
DeerFlow 不只是“会说它能做”,它是真的有一台自己的“电脑”。
每个任务都运行在隔离的 Docker 容器里,里面有完整的文件系统,包括 skills、workspace、uploads、outputs。agent 可以读写和编辑文件,可以执行 bash 命令和代码,也可以查看图片。整个过程都在 sandbox 内完成,可审计、会隔离,不会在不同 session 之间互相污染。
这就是“带工具的聊天机器人”和“真正有执行环境的 agent”之间的差别。
```text
# sandbox 容器内的路径
/mnt/user-data/
├── uploads/ ← 你的文件
├── workspace/ ← agents 的工作目录
└── outputs/ ← 最终交付物
```
### Context Engineering
**隔离的 Sub-Agent Context**:每个 sub-agent 都在自己独立的上下文里运行。它看不到主 agent 的上下文,也看不到其他 sub-agents 的上下文。这样做的目的很直接,就是让它只聚焦当前任务,不被无关信息干扰。
**摘要压缩**:在单个 session 内,DeerFlow 会比较积极地管理上下文,包括总结已完成的子任务、把中间结果转存到文件系统、压缩暂时不重要的信息。这样在长链路、多步骤任务里,它也能保持聚焦,而不会轻易把上下文窗口打爆。
### 长期记忆
大多数 agents 会在对话结束后把一切都忘掉,DeerFlow 不一样。
跨 session 使用时,DeerFlow 会逐步积累关于你的持久 memory,包括你的个人偏好、知识背景,以及长期沉淀下来的工作习惯。你用得越多,它越了解你的写作风格、技术栈和重复出现的工作流。memory 保存在本地,控制权也始终在你手里。
## 推荐模型
DeerFlow 对模型没有强绑定,只要实现了 OpenAI 兼容 API 的 LLM,理论上都可以接入。不过在下面这些能力上表现更强的模型,通常会更适合 DeerFlow:
- **长上下文窗口**(100k+ tokens),适合深度研究和多步骤任务
- **推理能力**,适合自适应规划和复杂拆解
- **多模态输入**,适合理解图片和视频
- **稳定的 tool use 能力**,适合可靠的函数调用和结构化输出
## 内嵌 Python Client
DeerFlow 也可以作为内嵌的 Python 库使用,不必启动完整的 HTTP 服务。`DeerFlowClient` 提供了进程内的直接访问方式,覆盖所有 agent 和 Gateway 能力,返回的数据结构与 HTTP Gateway API 保持一致:
```python
from deerflow.client import DeerFlowClient
client = DeerFlowClient()
# Chat
response = client.chat("Analyze this paper for me", thread_id="my-thread")
# Streaming(LangGraph SSE 协议:values、messages-tuple、end)
for event in client.stream("hello"):
if event.type == "messages-tuple" and event.data.get("type") == "ai":
print(event.data["content"])
# 配置与管理:返回值与 Gateway 对齐的 dict
models = client.list_models() # {"models": [...]}
skills = client.list_skills() # {"skills": [...]}
client.update_skill("web-search", enabled=True)
client.upload_files("thread-1", ["./report.pdf"]) # {"success": True, "files": [...]}
```
所有返回 dict 的方法都会在 CI 中通过 Gateway 的 Pydantic 响应模型校验(`TestGatewayConformance`),以确保内嵌 client 始终和 HTTP API schema 保持同步。完整 API 说明见 `backend/packages/harness/deerflow/client.py`。
## 文档
- [贡献指南](CONTRIBUTING.md) - 开发环境搭建与协作流程
- [配置指南](backend/docs/CONFIGURATION.md) - 安装与配置说明
- [架构概览](backend/CLAUDE.md) - 技术架构说明
- [后端架构](backend/README.md) - 后端架构与 API 参考
## 参与贡献
欢迎参与贡献。开发环境、工作流和相关规范见 [CONTRIBUTING.md](CONTRIBUTING.md)。
目前回归测试已经覆盖 Docker sandbox 模式识别,以及 `backend/tests/` 中 provisioner kubeconfig-path 处理相关测试。
## 许可证
本项目采用 [MIT License](./LICENSE) 开源发布。
## 致谢
DeerFlow 建立在开源社区大量优秀工作的基础上。所有让 DeerFlow 成为可能的项目和贡献者,我们都心怀感谢。毫不夸张地说,我们是站在巨人的肩膀上继续往前走。
特别感谢以下项目带来的关键支持:
- **[LangChain](https://github.com/langchain-ai/langchain)**:它们提供的优秀框架支撑了我们的 LLM 交互与 chains,让整体集成和能力编排顺畅可用。
- **[LangGraph](https://github.com/langchain-ai/langgraph)**:它们在多 agent 编排上的创新方式,是 DeerFlow 复杂工作流得以成立的重要基础。
这些项目体现了开源协作真正的力量,我们也很高兴能继续建立在这些基础之上。
### 核心贡献者
感谢 `DeerFlow` 的核心作者,是他们的判断、投入和持续推进,才让这个项目真正落地:
- **[Daniel Walnut](https://github.com/hetaoBackend/)**
- **[Henry Li](https://github.com/magiccube/)**
## Star History
[](https://star-history.com/#bytedance/deer-flow&Date)
================================================
FILE: SECURITY.md
================================================
# Security Policy
## Supported Versions
As deer-flow doesn't provide an offical release yet, please use the latest version for the security updates.
Current we have two branches to maintain:
* main branch for deer-flow 2.x
* main-1.x branch for deer-flow 1.x
## Reporting a Vulnerability
Please go to https://github.com/bytedance/deer-flow/security to report the vulnerability you find.
================================================
FILE: backend/.gitignore
================================================
# Python-generated files
__pycache__/
*.py[oc]
build/
dist/
wheels/
*.egg-info
.coverage
.coverage.*
.ruff_cache
agent_history.gif
static/browser_history/*.gif
log/
log/*
# Virtual environments
.venv
venv/
# User config file
config.yaml
# Langgraph
.langgraph_api
# Claude Code settings
.claude/settings.local.json
================================================
FILE: backend/.python-version
================================================
3.12
================================================
FILE: backend/AGENTS.md
================================================
For the backend architeture and design patterns:
@./CLAUDE.md
================================================
FILE: backend/CLAUDE.md
================================================
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Project Overview
DeerFlow is a LangGraph-based AI super agent system with a full-stack architecture. The backend provides a "super agent" with sandbox execution, persistent memory, subagent delegation, and extensible tool integration - all operating in per-thread isolated environments.
**Architecture**:
- **LangGraph Server** (port 2024): Agent runtime and workflow execution
- **Gateway API** (port 8001): REST API for models, MCP, skills, memory, artifacts, and uploads
- **Frontend** (port 3000): Next.js web interface
- **Nginx** (port 2026): Unified reverse proxy entry point
- **Provisioner** (port 8002, optional in Docker dev): Started only when sandbox is configured for provisioner/Kubernetes mode
**Project Structure**:
```
deer-flow/
├── Makefile # Root commands (check, install, dev, stop)
├── config.yaml # Main application configuration
├── extensions_config.json # MCP servers and skills configuration
├── backend/ # Backend application (this directory)
│ ├── Makefile # Backend-only commands (dev, gateway, lint)
│ ├── langgraph.json # LangGraph server configuration
│ ├── packages/
│ │ └── harness/ # deerflow-harness package (import: deerflow.*)
│ │ ├── pyproject.toml
│ │ └── deerflow/
│ │ ├── agents/ # LangGraph agent system
│ │ │ ├── lead_agent/ # Main agent (factory + system prompt)
│ │ │ ├── middlewares/ # 10 middleware components
│ │ │ ├── memory/ # Memory extraction, queue, prompts
│ │ │ └── thread_state.py # ThreadState schema
│ │ ├── sandbox/ # Sandbox execution system
│ │ │ ├── local/ # Local filesystem provider
│ │ │ ├── sandbox.py # Abstract Sandbox interface
│ │ │ ├── tools.py # bash, ls, read/write/str_replace
│ │ │ └── middleware.py # Sandbox lifecycle management
│ │ ├── subagents/ # Subagent delegation system
│ │ │ ├── builtins/ # general-purpose, bash agents
│ │ │ ├── executor.py # Background execution engine
│ │ │ └── registry.py # Agent registry
│ │ ├── tools/builtins/ # Built-in tools (present_files, ask_clarification, view_image)
│ │ ├── mcp/ # MCP integration (tools, cache, client)
│ │ ├── models/ # Model factory with thinking/vision support
│ │ ├── skills/ # Skills discovery, loading, parsing
│ │ ├── config/ # Configuration system (app, model, sandbox, tool, etc.)
│ │ ├── community/ # Community tools (tavily, jina_ai, firecrawl, image_search, aio_sandbox)
│ │ ├── reflection/ # Dynamic module loading (resolve_variable, resolve_class)
│ │ ├── utils/ # Utilities (network, readability)
│ │ └── client.py # Embedded Python client (DeerFlowClient)
│ ├── app/ # Application layer (import: app.*)
│ │ ├── gateway/ # FastAPI Gateway API
│ │ │ ├── app.py # FastAPI application
│ │ │ └── routers/ # 6 route modules
│ │ └── channels/ # IM platform integrations
│ ├── tests/ # Test suite
│ └── docs/ # Documentation
├── frontend/ # Next.js frontend application
└── skills/ # Agent skills directory
├── public/ # Public skills (committed)
└── custom/ # Custom skills (gitignored)
```
## Important Development Guidelines
### Documentation Update Policy
**CRITICAL: Always update README.md and CLAUDE.md after every code change**
When making code changes, you MUST update the relevant documentation:
- Update `README.md` for user-facing changes (features, setup, usage instructions)
- Update `CLAUDE.md` for development changes (architecture, commands, workflows, internal systems)
- Keep documentation synchronized with the codebase at all times
- Ensure accuracy and timeliness of all documentation
## Commands
**Root directory** (for full application):
```bash
make check # Check system requirements
make install # Install all dependencies (frontend + backend)
make dev # Start all services (LangGraph + Gateway + Frontend + Nginx), with config.yaml preflight
make stop # Stop all services
```
**Backend directory** (for backend development only):
```bash
make install # Install backend dependencies
make dev # Run LangGraph server only (port 2024)
make gateway # Run Gateway API only (port 8001)
make test # Run all backend tests
make lint # Lint with ruff
make format # Format code with ruff
```
Regression tests related to Docker/provisioner behavior:
- `tests/test_docker_sandbox_mode_detection.py` (mode detection from `config.yaml`)
- `tests/test_provisioner_kubeconfig.py` (kubeconfig file/directory handling)
Boundary check (harness → app import firewall):
- `tests/test_harness_boundary.py` — ensures `packages/harness/deerflow/` never imports from `app.*`
CI runs these regression tests for every pull request via [.github/workflows/backend-unit-tests.yml](../.github/workflows/backend-unit-tests.yml).
## Architecture
### Harness / App Split
The backend is split into two layers with a strict dependency direction:
- **Harness** (`packages/harness/deerflow/`): Publishable agent framework package (`deerflow-harness`). Import prefix: `deerflow.*`. Contains agent orchestration, tools, sandbox, models, MCP, skills, config — everything needed to build and run agents.
- **App** (`app/`): Unpublished application code. Import prefix: `app.*`. Contains the FastAPI Gateway API and IM channel integrations (Feishu, Slack, Telegram).
**Dependency rule**: App imports deerflow, but deerflow never imports app. This boundary is enforced by `tests/test_harness_boundary.py` which runs in CI.
**Import conventions**:
```python
# Harness internal
from deerflow.agents import make_lead_agent
from deerflow.models import create_chat_model
# App internal
from app.gateway.app import app
from app.channels.service import start_channel_service
# App → Harness (allowed)
from deerflow.config import get_app_config
# Harness → App (FORBIDDEN — enforced by test_harness_boundary.py)
# from app.gateway.routers.uploads import ... # ← will fail CI
```
### Agent System
**Lead Agent** (`packages/harness/deerflow/agents/lead_agent/agent.py`):
- Entry point: `make_lead_agent(config: RunnableConfig)` registered in `langgraph.json`
- Dynamic model selection via `create_chat_model()` with thinking/vision support
- Tools loaded via `get_available_tools()` - combines sandbox, built-in, MCP, community, and subagent tools
- System prompt generated by `apply_prompt_template()` with skills, memory, and subagent instructions
**ThreadState** (`packages/harness/deerflow/agents/thread_state.py`):
- Extends `AgentState` with: `sandbox`, `thread_data`, `title`, `artifacts`, `todos`, `uploaded_files`, `viewed_images`
- Uses custom reducers: `merge_artifacts` (deduplicate), `merge_viewed_images` (merge/clear)
**Runtime Configuration** (via `config.configurable`):
- `thinking_enabled` - Enable model's extended thinking
- `model_name` - Select specific LLM model
- `is_plan_mode` - Enable TodoList middleware
- `subagent_enabled` - Enable task delegation tool
### Middleware Chain
Middlewares execute in strict order in `packages/harness/deerflow/agents/lead_agent/agent.py`:
1. **ThreadDataMiddleware** - Creates per-thread directories (`backend/.deer-flow/threads/{thread_id}/user-data/{workspace,uploads,outputs}`)
2. **UploadsMiddleware** - Tracks and injects newly uploaded files into conversation
3. **SandboxMiddleware** - Acquires sandbox, stores `sandbox_id` in state
4. **DanglingToolCallMiddleware** - Injects placeholder ToolMessages for AIMessage tool_calls that lack responses (e.g., due to user interruption)
5. **SummarizationMiddleware** - Context reduction when approaching token limits (optional, if enabled)
6. **TodoListMiddleware** - Task tracking with `write_todos` tool (optional, if plan_mode)
7. **TitleMiddleware** - Auto-generates thread title after first complete exchange and normalizes structured message content before prompting the title model
8. **MemoryMiddleware** - Queues conversations for async memory update (filters to user + final AI responses)
9. **ViewImageMiddleware** - Injects base64 image data before LLM call (conditional on vision support)
10. **SubagentLimitMiddleware** - Truncates excess `task` tool calls from model response to enforce `MAX_CONCURRENT_SUBAGENTS` limit (optional, if subagent_enabled)
11. **ClarificationMiddleware** - Intercepts `ask_clarification` tool calls, interrupts via `Command(goto=END)` (must be last)
### Configuration System
**Main Configuration** (`config.yaml`):
Setup: Copy `config.example.yaml` to `config.yaml` in the **project root** directory.
**Config Versioning**: `config.example.yaml` has a `config_version` field. On startup, `AppConfig.from_file()` compares user version vs example version and emits a warning if outdated. Missing `config_version` = version 0. Run `make config-upgrade` to auto-merge missing fields. When changing the config schema, bump `config_version` in `config.example.yaml`.
**Config Caching**: `get_app_config()` caches the parsed config, but automatically reloads it when the resolved config path changes or the file's mtime increases. This keeps Gateway and LangGraph reads aligned with `config.yaml` edits without requiring a manual process restart.
Configuration priority:
1. Explicit `config_path` argument
2. `DEER_FLOW_CONFIG_PATH` environment variable
3. `config.yaml` in current directory (backend/)
4. `config.yaml` in parent directory (project root - **recommended location**)
Config values starting with `$` are resolved as environment variables (e.g., `$OPENAI_API_KEY`).
`ModelConfig` also declares `use_responses_api` and `output_version` so OpenAI `/v1/responses` can be enabled explicitly while still using `langchain_openai:ChatOpenAI`.
**Extensions Configuration** (`extensions_config.json`):
MCP servers and skills are configured together in `extensions_config.json` in project root:
Configuration priority:
1. Explicit `config_path` argument
2. `DEER_FLOW_EXTENSIONS_CONFIG_PATH` environment variable
3. `extensions_config.json` in current directory (backend/)
4. `extensions_config.json` in parent directory (project root - **recommended location**)
### Gateway API (`app/gateway/`)
FastAPI application on port 8001 with health check at `GET /health`.
**Routers**:
| Router | Endpoints |
|--------|-----------|
| **Models** (`/api/models`) | `GET /` - list models; `GET /{name}` - model details |
| **MCP** (`/api/mcp`) | `GET /config` - get config; `PUT /config` - update config (saves to extensions_config.json) |
| **Skills** (`/api/skills`) | `GET /` - list skills; `GET /{name}` - details; `PUT /{name}` - update enabled; `POST /install` - install from .skill archive (accepts standard optional frontmatter like `version`, `author`, `compatibility`) |
| **Memory** (`/api/memory`) | `GET /` - memory data; `POST /reload` - force reload; `GET /config` - config; `GET /status` - config + data |
| **Uploads** (`/api/threads/{id}/uploads`) | `POST /` - upload files (auto-converts PDF/PPT/Excel/Word); `GET /list` - list; `DELETE /{filename}` - delete |
| **Artifacts** (`/api/threads/{id}/artifacts`) | `GET /{path}` - serve artifacts; `?download=true` for file download |
| **Suggestions** (`/api/threads/{id}/suggestions`) | `POST /` - generate follow-up questions; rich list/block model content is normalized before JSON parsing |
Proxied through nginx: `/api/langgraph/*` → LangGraph, all other `/api/*` → Gateway.
### Sandbox System (`packages/harness/deerflow/sandbox/`)
**Interface**: Abstract `Sandbox` with `execute_command`, `read_file`, `write_file`, `list_dir`
**Provider Pattern**: `SandboxProvider` with `acquire`, `get`, `release` lifecycle
**Implementations**:
- `LocalSandboxProvider` - Singleton local filesystem execution with path mappings
- `AioSandboxProvider` (`packages/harness/deerflow/community/`) - Docker-based isolation
**Virtual Path System**:
- Agent sees: `/mnt/user-data/{workspace,uploads,outputs}`, `/mnt/skills`
- Physical: `backend/.deer-flow/threads/{thread_id}/user-data/...`, `deer-flow/skills/`
- Translation: `replace_virtual_path()` / `replace_virtual_paths_in_command()`
- Detection: `is_local_sandbox()` checks `sandbox_id == "local"`
**Sandbox Tools** (in `packages/harness/deerflow/sandbox/tools.py`):
- `bash` - Execute commands with path translation and error handling
- `ls` - Directory listing (tree format, max 2 levels)
- `read_file` - Read file contents with optional line range
- `write_file` - Write/append to files, creates directories
- `str_replace` - Substring replacement (single or all occurrences)
### Subagent System (`packages/harness/deerflow/subagents/`)
**Built-in Agents**: `general-purpose` (all tools except `task`) and `bash` (command specialist)
**Execution**: Dual thread pool - `_scheduler_pool` (3 workers) + `_execution_pool` (3 workers)
**Concurrency**: `MAX_CONCURRENT_SUBAGENTS = 3` enforced by `SubagentLimitMiddleware` (truncates excess tool calls in `after_model`), 15-minute timeout
**Flow**: `task()` tool → `SubagentExecutor` → background thread → poll 5s → SSE events → result
**Events**: `task_started`, `task_running`, `task_completed`/`task_failed`/`task_timed_out`
### Tool System (`packages/harness/deerflow/tools/`)
`get_available_tools(groups, include_mcp, model_name, subagent_enabled)` assembles:
1. **Config-defined tools** - Resolved from `config.yaml` via `resolve_variable()`
2. **MCP tools** - From enabled MCP servers (lazy initialized, cached with mtime invalidation)
3. **Built-in tools**:
- `present_files` - Make output files visible to user (only `/mnt/user-data/outputs`)
- `ask_clarification` - Request clarification (intercepted by ClarificationMiddleware → interrupts)
- `view_image` - Read image as base64 (added only if model supports vision)
4. **Subagent tool** (if enabled):
- `task` - Delegate to subagent (description, prompt, subagent_type, max_turns)
**Community tools** (`packages/harness/deerflow/community/`):
- `tavily/` - Web search (5 results default) and web fetch (4KB limit)
- `jina_ai/` - Web fetch via Jina reader API with readability extraction
- `firecrawl/` - Web scraping via Firecrawl API
- `image_search/` - Image search via DuckDuckGo
### MCP System (`packages/harness/deerflow/mcp/`)
- Uses `langchain-mcp-adapters` `MultiServerMCPClient` for multi-server management
- **Lazy initialization**: Tools loaded on first use via `get_cached_mcp_tools()`
- **Cache invalidation**: Detects config file changes via mtime comparison
- **Transports**: stdio (command-based), SSE, HTTP
- **OAuth (HTTP/SSE)**: Supports token endpoint flows (`client_credentials`, `refresh_token`) with automatic token refresh + Authorization header injection
- **Runtime updates**: Gateway API saves to extensions_config.json; LangGraph detects via mtime
### Skills System (`packages/harness/deerflow/skills/`)
- **Location**: `deer-flow/skills/{public,custom}/`
- **Format**: Directory with `SKILL.md` (YAML frontmatter: name, description, license, allowed-tools)
- **Loading**: `load_skills()` recursively scans `skills/{public,custom}` for `SKILL.md`, parses metadata, and reads enabled state from extensions_config.json
- **Injection**: Enabled skills listed in agent system prompt with container paths
- **Installation**: `POST /api/skills/install` extracts .skill ZIP archive to custom/ directory
### Model Factory (`packages/harness/deerflow/models/factory.py`)
- `create_chat_model(name, thinking_enabled)` instantiates LLM from config via reflection
- Supports `thinking_enabled` flag with per-model `when_thinking_enabled` overrides
- Supports `supports_vision` flag for image understanding models
- Config values starting with `$` resolved as environment variables
- Missing provider modules surface actionable install hints from reflection resolvers (for example `uv add langchain-google-genai`)
### IM Channels System (`app/channels/`)
Bridges external messaging platforms (Feishu, Slack, Telegram) to the DeerFlow agent via the LangGraph Server.
**Architecture**: Channels communicate with the LangGraph Server through `langgraph-sdk` HTTP client (same as the frontend), ensuring threads are created and managed server-side.
**Components**:
- `message_bus.py` - Async pub/sub hub (`InboundMessage` → queue → dispatcher; `OutboundMessage` → callbacks → channels)
- `store.py` - JSON-file persistence mapping `channel_name:chat_id[:topic_id]` → `thread_id` (keys are `channel:chat` for root conversations and `channel:chat:topic` for threaded conversations)
- `manager.py` - Core dispatcher: creates threads via `client.threads.create()`, routes commands, keeps Slack/Telegram on `client.runs.wait()`, and uses `client.runs.stream(["messages-tuple", "values"])` for Feishu incremental outbound updates
- `base.py` - Abstract `Channel` base class (start/stop/send lifecycle)
- `service.py` - Manages lifecycle of all configured channels from `config.yaml`
- `slack.py` / `feishu.py` / `telegram.py` - Platform-specific implementations (`feishu.py` tracks the running card `message_id` in memory and patches the same card in place)
**Message Flow**:
1. External platform -> Channel impl -> `MessageBus.publish_inbound()`
2. `ChannelManager._dispatch_loop()` consumes from queue
3. For chat: look up/create thread on LangGraph Server
4. Feishu chat: `runs.stream()` → accumulate AI text → publish multiple outbound updates (`is_final=False`) → publish final outbound (`is_final=True`)
5. Slack/Telegram chat: `runs.wait()` → extract final response → publish outbound
6. Feishu channel sends one running reply card up front, then patches the same card for each outbound update (card JSON sets `config.update_multi=true` for Feishu's patch API requirement)
7. For commands (`/new`, `/status`, `/models`, `/memory`, `/help`): handle locally or query Gateway API
8. Outbound → channel callbacks → platform reply
**Configuration** (`config.yaml` -> `channels`):
- `langgraph_url` - LangGraph Server URL (default: `http://localhost:2024`)
- `gateway_url` - Gateway API URL for auxiliary commands (default: `http://localhost:8001`)
- Per-channel configs: `feishu` (app_id, app_secret), `slack` (bot_token, app_token), `telegram` (bot_token)
### Memory System (`packages/harness/deerflow/agents/memory/`)
**Components**:
- `updater.py` - LLM-based memory updates with fact extraction, whitespace-normalized fact deduplication (trims leading/trailing whitespace before comparing), and atomic file I/O
- `queue.py` - Debounced update queue (per-thread deduplication, configurable wait time)
- `prompt.py` - Prompt templates for memory updates
**Data Structure** (stored in `backend/.deer-flow/memory.json`):
- **User Context**: `workContext`, `personalContext`, `topOfMind` (1-3 sentence summaries)
- **History**: `recentMonths`, `earlierContext`, `longTermBackground`
- **Facts**: Discrete facts with `id`, `content`, `category` (preference/knowledge/context/behavior/goal), `confidence` (0-1), `createdAt`, `source`
**Workflow**:
1. `MemoryMiddleware` filters messages (user inputs + final AI responses) and queues conversation
2. Queue debounces (30s default), batches updates, deduplicates per-thread
3. Background thread invokes LLM to extract context updates and facts
4. Applies updates atomically (temp file + rename) with cache invalidation, skipping duplicate fact content before append
5. Next interaction injects top 15 facts + context into `<memory>` tags in system prompt
Focused regression coverage for the updater lives in `backend/tests/test_memory_updater.py`.
**Configuration** (`config.yaml` → `memory`):
- `enabled` / `injection_enabled` - Master switches
- `storage_path` - Path to memory.json
- `debounce_seconds` - Wait time before processing (default: 30)
- `model_name` - LLM for updates (null = default model)
- `max_facts` / `fact_confidence_threshold` - Fact storage limits (100 / 0.7)
- `max_injection_tokens` - Token limit for prompt injection (2000)
### Reflection System (`packages/harness/deerflow/reflection/`)
- `resolve_variable(path)` - Import module and return variable (e.g., `module.path:variable_name`)
- `resolve_class(path, base_class)` - Import and validate class against base class
### Config Schema
**`config.yaml`** key sections:
- `models[]` - LLM configs with `use` class path, `supports_thinking`, `supports_vision`, provider-specific fields
- `tools[]` - Tool configs with `use` variable path and `group`
- `tool_groups[]` - Logical groupings for tools
- `sandbox.use` - Sandbox provider class path
- `skills.path` / `skills.container_path` - Host and container paths to skills directory
- `title` - Auto-title generation (enabled, max_words, max_chars, prompt_template)
- `summarization` - Context summarization (enabled, trigger conditions, keep policy)
- `subagents.enabled` - Master switch for subagent delegation
- `memory` - Memory system (enabled, storage_path, debounce_seconds, model_name, max_facts, fact_confidence_threshold, injection_enabled, max_injection_tokens)
**`extensions_config.json`**:
- `mcpServers` - Map of server name → config (enabled, type, command, args, env, url, headers, oauth, description)
- `skills` - Map of skill name → state (enabled)
Both can be modified at runtime via Gateway API endpoints or `DeerFlowClient` methods.
### Embedded Client (`packages/harness/deerflow/client.py`)
`DeerFlowClient` provides direct in-process access to all DeerFlow capabilities without HTTP services. All return types align with the Gateway API response schemas, so consumer code works identically in HTTP and embedded modes.
**Architecture**: Imports the same `deerflow` modules that LangGraph Server and Gateway API use. Shares the same config files and data directories. No FastAPI dependency.
**Agent Conversation** (replaces LangGraph Server):
- `chat(message, thread_id)` — synchronous, returns final text
- `stream(message, thread_id)` — yields `StreamEvent` aligned with LangGraph SSE protocol:
- `"values"` — full state snapshot (title, messages, artifacts)
- `"messages-tuple"` — per-message update (AI text, tool calls, tool results)
- `"end"` — stream finished
- Agent created lazily via `create_agent()` + `_build_middlewares()`, same as `make_lead_agent`
- Supports `checkpointer` parameter for state persistence across turns
- `reset_agent()` forces agent recreation (e.g. after memory or skill changes)
**Gateway Equivalent Methods** (replaces Gateway API):
| Category | Methods | Return format |
|----------|---------|---------------|
| Models | `list_models()`, `get_model(name)` | `{"models": [...]}`, `{name, display_name, ...}` |
| MCP | `get_mcp_config()`, `update_mcp_config(servers)` | `{"mcp_servers": {...}}` |
| Skills | `list_skills()`, `get_skill(name)`, `update_skill(name, enabled)`, `install_skill(path)` | `{"skills": [...]}` |
| Memory | `get_memory()`, `reload_memory()`, `get_memory_config()`, `get_memory_status()` | dict |
| Uploads | `upload_files(thread_id, files)`, `list_uploads(thread_id)`, `delete_upload(thread_id, filename)` | `{"success": true, "files": [...]}`, `{"files": [...], "count": N}` |
| Artifacts | `get_artifact(thread_id, path)` → `(bytes, mime_type)` | tuple |
**Key difference from Gateway**: Upload accepts local `Path` objects instead of HTTP `UploadFile`, rejects directory paths before copying, and reuses a single worker when document conversion must run inside an active event loop. Artifact returns `(bytes, mime_type)` instead of HTTP Response. `update_mcp_config()` and `update_skill()` automatically invalidate the cached agent.
**Tests**: `tests/test_client.py` (77 unit tests including `TestGatewayConformance`), `tests/test_client_live.py` (live integration tests, requires config.yaml)
**Gateway Conformance Tests** (`TestGatewayConformance`): Validate that every dict-returning client method conforms to the corresponding Gateway Pydantic response model. Each test parses the client output through the Gateway model — if Gateway adds a required field that the client doesn't provide, Pydantic raises `ValidationError` and CI catches the drift. Covers: `ModelsListResponse`, `ModelResponse`, `SkillsListResponse`, `SkillResponse`, `SkillInstallResponse`, `McpConfigResponse`, `UploadResponse`, `MemoryConfigResponse`, `MemoryStatusResponse`.
## Development Workflow
### Test-Driven Development (TDD) — MANDATORY
**Every new feature or bug fix MUST be accompanied by unit tests. No exceptions.**
- Write tests in `backend/tests/` following the existing naming convention `test_<feature>.py`
- Run the full suite before and after your change: `make test`
- Tests must pass before a feature is considered complete
- For lightweight config/utility modules, prefer pure unit tests with no external dependencies
- If a module causes circular import issues in tests, add a `sys.modules` mock in `tests/conftest.py` (see existing example for `deerflow.subagents.executor`)
```bash
# Run all tests
make test
# Run a specific test file
PYTHONPATH=. uv run pytest tests/test_<feature>.py -v
```
### Running the Full Application
From the **project root** directory:
```bash
make dev
```
This starts all services and makes the application available at `http://localhost:2026`.
**Nginx routing**:
- `/api/langgraph/*` → LangGraph Server (2024)
- `/api/*` (other) → Gateway API (8001)
- `/` (non-API) → Frontend (3000)
### Running Backend Services Separately
From the **backend** directory:
```bash
# Terminal 1: LangGraph server
make dev
# Terminal 2: Gateway API
make gateway
```
Direct access (without nginx):
- LangGraph: `http://localhost:2024`
- Gateway: `http://localhost:8001`
### Frontend Configuration
The frontend uses environment variables to connect to backend services:
- `NEXT_PUBLIC_LANGGRAPH_BASE_URL` - Defaults to `/api/langgraph` (through nginx)
- `NEXT_PUBLIC_BACKEND_BASE_URL` - Defaults to empty string (through nginx)
When using `make dev` from root, the frontend automatically connects through nginx.
## Key Features
### File Upload
Multi-file upload with automatic document conversion:
- Endpoint: `POST /api/threads/{thread_id}/uploads`
- Supports: PDF, PPT, Excel, Word documents (converted via `markitdown`)
- Rejects directory inputs before copying so uploads stay all-or-nothing
- Reuses one conversion worker per request when called from an active event loop
- Files stored in thread-isolated directories
- Agent receives uploaded file list via `UploadsMiddleware`
See [docs/FILE_UPLOAD.md](docs/FILE_UPLOAD.md) for details.
### Plan Mode
TodoList middleware for complex multi-step tasks:
- Controlled via runtime config: `config.configurable.is_plan_mode = True`
- Provides `write_todos` tool for task tracking
- One task in_progress at a time, real-time updates
See [docs/plan_mode_usage.md](docs/plan_mode_usage.md) for details.
### Context Summarization
Automatic conversation summarization when approaching token limits:
- Configured in `config.yaml` under `summarization` key
- Trigger types: tokens, messages, or fraction of max input
- Keeps recent messages while summarizing older ones
See [docs/summarization.md](docs/summarization.md) for details.
### Vision Support
For models with `supports_vision: true`:
- `ViewImageMiddleware` processes images in conversation
- `view_image_tool` added to agent's toolset
- Images automatically converted to base64 and injected into state
## Code Style
- Uses `ruff` for linting and formatting
- Line length: 240 characters
- Python 3.12+ with type hints
- Double quotes, space indentation
## Documentation
See `docs/` directory for detailed documentation:
- [CONFIGURATION.md](docs/CONFIGURATION.md) - Configuration options
- [ARCHITECTURE.md](docs/ARCHITECTURE.md) - Architecture details
- [API.md](docs/API.md) - API reference
- [SETUP.md](docs/SETUP.md) - Setup guide
- [FILE_UPLOAD.md](docs/FILE_UPLOAD.md) - File upload feature
- [PATH_EXAMPLES.md](docs/PATH_EXAMPLES.md) - Path types and usage
- [summarization.md](docs/summarization.md) - Context summarization
- [plan_mode_usage.md](docs/plan_mode_usage.md) - Plan mode with TodoList
================================================
FILE: backend/CONTRIBUTING.md
================================================
# Contributing to DeerFlow Backend
Thank you for your interest in contributing to DeerFlow! This document provides guidelines and instructions for contributing to the backend codebase.
## Table of Contents
- [Getting Started](#getting-started)
- [Development Setup](#development-setup)
- [Project Structure](#project-structure)
- [Code Style](#code-style)
- [Making Changes](#making-changes)
- [Testing](#testing)
- [Pull Request Process](#pull-request-process)
- [Architecture Guidelines](#architecture-guidelines)
## Getting Started
### Prerequisites
- Python 3.12 or higher
- [uv](https://docs.astral.sh/uv/) package manager
- Git
- Docker (optional, for Docker sandbox testing)
### Fork and Clone
1. Fork the repository on GitHub
2. Clone your fork locally:
```bash
git clone https://github.com/YOUR_USERNAME/deer-flow.git
cd deer-flow
```
## Development Setup
### Install Dependencies
```bash
# From project root
cp config.example.yaml config.yaml
# Install backend dependencies
cd backend
make install
```
### Configure Environment
Set up your API keys for testing:
```bash
export OPENAI_API_KEY="your-api-key"
# Add other keys as needed
```
### Run the Development Server
```bash
# Terminal 1: LangGraph server
make dev
# Terminal 2: Gateway API
make gateway
```
## Project Structure
```
backend/src/
├── agents/ # Agent system
│ ├── lead_agent/ # Main agent implementation
│ │ └── agent.py # Agent factory and creation
│ ├── middlewares/ # Agent middlewares
│ │ ├── thread_data_middleware.py
│ │ ├── sandbox_middleware.py
│ │ ├── title_middleware.py
│ │ ├── uploads_middleware.py
│ │ ├── view_image_middleware.py
│ │ └── clarification_middleware.py
│ └── thread_state.py # Thread state definition
│
├── gateway/ # FastAPI Gateway
│ ├── app.py # FastAPI application
│ └── routers/ # Route handlers
│ ├── models.py # /api/models endpoints
│ ├── mcp.py # /api/mcp endpoints
│ ├── skills.py # /api/skills endpoints
│ ├── artifacts.py # /api/threads/.../artifacts
│ └── uploads.py # /api/threads/.../uploads
│
├── sandbox/ # Sandbox execution
│ ├── __init__.py # Sandbox interface
│ ├── local.py # Local sandbox provider
│ └── tools.py # Sandbox tools (bash, file ops)
│
├── tools/ # Agent tools
│ └── builtins/ # Built-in tools
│ ├── present_file_tool.py
│ ├── ask_clarification_tool.py
│ └── view_image_tool.py
│
├── mcp/ # MCP integration
│ └── manager.py # MCP server management
│
├── models/ # Model system
│ └── factory.py # Model factory
│
├── skills/ # Skills system
│ └── loader.py # Skills loader
│
├── config/ # Configuration
│ ├── app_config.py # Main app config
│ ├── extensions_config.py # Extensions config
│ └── summarization_config.py
│
├── community/ # Community tools
│ ├── tavily/ # Tavily web search
│ ├── jina/ # Jina web fetch
│ ├── firecrawl/ # Firecrawl scraping
│ └── aio_sandbox/ # Docker sandbox
│
├── reflection/ # Dynamic loading
│ └── __init__.py # Module resolution
│
└── utils/ # Utilities
└── __init__.py
```
## Code Style
### Linting and Formatting
We use `ruff` for both linting and formatting:
```bash
# Check for issues
make lint
# Auto-fix and format
make format
```
### Style Guidelines
- **Line length**: 240 characters maximum
- **Python version**: 3.12+ features allowed
- **Type hints**: Use type hints for function signatures
- **Quotes**: Double quotes for strings
- **Indentation**: 4 spaces (no tabs)
- **Imports**: Group by standard library, third-party, local
### Docstrings
Use docstrings for public functions and classes:
```python
def create_chat_model(name: str, thinking_enabled: bool = False) -> BaseChatModel:
"""Create a chat model instance from configuration.
Args:
name: The model name as defined in config.yaml
thinking_enabled: Whether to enable extended thinking
Returns:
A configured LangChain chat model instance
Raises:
ValueError: If the model name is not found in configuration
"""
...
```
## Making Changes
### Branch Naming
Use descriptive branch names:
- `feature/add-new-tool` - New features
- `fix/sandbox-timeout` - Bug fixes
- `docs/update-readme` - Documentation
- `refactor/config-system` - Code refactoring
### Commit Messages
Write clear, concise commit messages:
```
feat: add support for Claude 3.5 model
- Add model configuration in config.yaml
- Update model factory to handle Claude-specific settings
- Add tests for new model
```
Prefix types:
- `feat:` - New feature
- `fix:` - Bug fix
- `docs:` - Documentation
- `refactor:` - Code refactoring
- `test:` - Tests
- `chore:` - Build/config changes
## Testing
### Running Tests
```bash
uv run pytest
```
### Writing Tests
Place tests in the `tests/` directory mirroring the source structure:
```
tests/
├── test_models/
│ └── test_factory.py
├── test_sandbox/
│ └── test_local.py
└── test_gateway/
└── test_models_router.py
```
Example test:
```python
import pytest
from deerflow.models.factory import create_chat_model
def test_create_chat_model_with_valid_name():
"""Test that a valid model name creates a model instance."""
model = create_chat_model("gpt-4")
assert model is not None
def test_create_chat_model_with_invalid_name():
"""Test that an invalid model name raises ValueError."""
with pytest.raises(ValueError):
create_chat_model("nonexistent-model")
```
## Pull Request Process
### Before Submitting
1. **Ensure tests pass**: `uv run pytest`
2. **Run linter**: `make lint`
3. **Format code**: `make format`
4. **Update documentation** if needed
### PR Description
Include in your PR description:
- **What**: Brief description of changes
- **Why**: Motivation for the change
- **How**: Implementation approach
- **Testing**: How you tested the changes
### Review Process
1. Submit PR with clear description
2. Address review feedback
3. Ensure CI passes
4. Maintainer will merge when approved
## Architecture Guidelines
### Adding New Tools
1. Create tool in `packages/harness/deerflow/tools/builtins/` or `packages/harness/deerflow/community/`:
```python
# packages/harness/deerflow/tools/builtins/my_tool.py
from langchain_core.tools import tool
@tool
def my_tool(param: str) -> str:
"""Tool description for the agent.
Args:
param: Description of the parameter
Returns:
Description of return value
"""
return f"Result: {param}"
```
2. Register in `config.yaml`:
```yaml
tools:
- name: my_tool
group: my_group
use: deerflow.tools.builtins.my_tool:my_tool
```
### Adding New Middleware
1. Create middleware in `packages/harness/deerflow/agents/middlewares/`:
```python
# packages/harness/deerflow/agents/middlewares/my_middleware.py
from langchain.agents.middleware import BaseMiddleware
from langchain_core.runnables import RunnableConfig
class MyMiddleware(BaseMiddleware):
"""Middleware description."""
def transform_state(self, state: dict, config: RunnableConfig) -> dict:
"""Transform the state before agent execution."""
# Modify state as needed
return state
```
2. Register in `packages/harness/deerflow/agents/lead_agent/agent.py`:
```python
middlewares = [
ThreadDataMiddleware(),
SandboxMiddleware(),
MyMiddleware(), # Add your middleware
TitleMiddleware(),
ClarificationMiddleware(),
]
```
### Adding New API Endpoints
1. Create router in `app/gateway/routers/`:
```python
# app/gateway/routers/my_router.py
from fastapi import APIRouter
router = APIRouter(prefix="/my-endpoint", tags=["my-endpoint"])
@router.get("/")
async def get_items():
"""Get all items."""
return {"items": []}
@router.post("/")
async def create_item(data: dict):
"""Create a new item."""
return {"created": data}
```
2. Register in `app/gateway/app.py`:
```python
from app.gateway.routers import my_router
app.include_router(my_router.router)
```
### Configuration Changes
When adding new configuration options:
1. Update `packages/harness/deerflow/config/app_config.py` with new fields
2. Add default values in `config.example.yaml`
3. Document in `docs/CONFIGURATION.md`
### MCP Server Integration
To add support for a new MCP server:
1. Add configuration in `extensions_config.json`:
```json
{
"mcpServers": {
"my-server": {
"enabled": true,
"type": "stdio",
"command": "npx",
"args": ["-y", "@my-org/mcp-server"],
"description": "My MCP Server"
}
}
}
```
2. Update `extensions_config.example.json` with the new server
### Skills Development
To create a new skill:
1. Create directory in `skills/public/` or `skills/custom/`:
```
skills/public/my-skill/
└── SKILL.md
```
2. Write `SKILL.md` with YAML front matter:
```markdown
---
name: My Skill
description: What this skill does
license: MIT
allowed-tools:
- read_file
- write_file
- bash
---
# My Skill
Instructions for the agent when this skill is enabled...
```
## Questions?
If you have questions about contributing:
1. Check existing documentation in `docs/`
2. Look for similar issues or PRs on GitHub
3. Open a discussion or issue on GitHub
Thank you for contributing to DeerFlow!
================================================
FILE: backend/Dockerfile
================================================
# Backend Development Dockerfile
FROM python:3.12-slim
ARG NODE_MAJOR=22
# Install system dependencies + Node.js (provides npx for MCP servers)
RUN apt-get update && apt-get install -y \
curl \
build-essential \
gnupg \
ca-certificates \
&& mkdir -p /etc/apt/keyrings \
&& curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key -o /etc/apt/keyrings/nodesource.gpg \
&& echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_${NODE_MAJOR}.x nodistro main" > /etc/apt/sources.list.d/nodesource.list \
&& apt-get update \
&& apt-get install -y nodejs \
&& rm -rf /var/lib/apt/lists/*
# Install Docker CLI (for DooD: allows starting sandbox containers via host Docker socket)
COPY --from=docker:cli /usr/local/bin/docker /usr/local/bin/docker
# Install uv from a pinned versioned image (avoids curl|sh from untrusted remote)
COPY --from=ghcr.io/astral-sh/uv:0.7.20 /uv /uvx /usr/local/bin/
# Set working directory
WORKDIR /app
# Copy frontend source code
COPY backend ./backend
# Install dependencies with cache mount
RUN --mount=type=cache,target=/root/.cache/uv \
sh -c "cd backend && uv sync"
# Expose ports (gateway: 8001, langgraph: 2024)
EXPOSE 8001 2024
# Default command (can be overridden in docker-compose)
CMD ["sh", "-c", "cd backend && PYTHONPATH=. uv run uvicorn app.gateway.app:app --host 0.0.0.0 --port 8001"]
================================================
FILE: backend/Makefile
================================================
install:
uv sync
dev:
uv run langgraph dev --no-browser --allow-blocking --no-reload
gateway:
PYTHONPATH=. uv run uvicorn app.gateway.app:app --host 0.0.0.0 --port 8001
test:
PYTHONPATH=. uv run pytest tests/ -v
lint:
uvx ruff check .
format:
uvx ruff check . --fix && uvx ruff format .
================================================
FILE: backend/README.md
================================================
# DeerFlow Backend
DeerFlow is a LangGraph-based AI super agent with sandbox execution, persistent memory, and extensible tool integration. The backend enables AI agents to execute code, browse the web, manage files, delegate tasks to subagents, and retain context across conversations - all in isolated, per-thread environments.
---
## Architecture
```
┌──────────────────────────────────────┐
│ Nginx (Port 2026) │
│ Unified reverse proxy │
└───────┬──────────────────┬───────────┘
│ │
/api/langgraph/* │ │ /api/* (other)
▼ ▼
┌────────────────────┐ ┌────────────────────────┐
│ LangGraph Server │ │ Gateway API (8001) │
│ (Port 2024) │ │ FastAPI REST │
│ │ │ │
│ ┌────────────────┐ │ │ Models, MCP, Skills, │
│ │ Lead Agent │ │ │ Memory, Uploads, │
│ │ ┌──────────┐ │ │ │ Artifacts │
│ │ │Middleware│ │ │ └────────────────────────┘
│ │ │ Chain │ │ │
│ │ └──────────┘ │ │
│ │ ┌──────────┐ │ │
│ │ │ Tools │ │ │
│ │ └──────────┘ │ │
│ │ ┌──────────┐ │ │
│ │ │Subagents │ │ │
│ │ └──────────┘ │ │
│ └────────────────┘ │
└────────────────────┘
```
**Request Routing** (via Nginx):
- `/api/langgraph/*` → LangGraph Server - agent interactions, threads, streaming
- `/api/*` (other) → Gateway API - models, MCP, skills, memory, artifacts, uploads
- `/` (non-API) → Frontend - Next.js web interface
---
## Core Components
### Lead Agent
The single LangGraph agent (`lead_agent`) is the runtime entry point, created via `make_lead_agent(config)`. It combines:
- **Dynamic model selection** with thinking and vision support
- **Middleware chain** for cross-cutting concerns (9 middlewares)
- **Tool system** with sandbox, MCP, community, and built-in tools
- **Subagent delegation** for parallel task execution
- **System prompt** with skills injection, memory context, and working directory guidance
### Middleware Chain
Middlewares execute in strict order, each handling a specific concern:
| # | Middleware | Purpose |
|---|-----------|---------|
| 1 | **ThreadDataMiddleware** | Creates per-thread isolated directories (workspace, uploads, outputs) |
| 2 | **UploadsMiddleware** | Injects newly uploaded files into conversation context |
| 3 | **SandboxMiddleware** | Acquires sandbox environment for code execution |
| 4 | **SummarizationMiddleware** | Reduces context when approaching token limits (optional) |
| 5 | **TodoListMiddleware** | Tracks multi-step tasks in plan mode (optional) |
| 6 | **TitleMiddleware** | Auto-generates conversation titles after first exchange |
| 7 | **MemoryMiddleware** | Queues conversations for async memory extraction |
| 8 | **ViewImageMiddleware** | Injects image data for vision-capable models (conditional) |
| 9 | **ClarificationMiddleware** | Intercepts clarification requests and interrupts execution (must be last) |
### Sandbox System
Per-thread isolated execution with virtual path translation:
- **Abstract interface**: `execute_command`, `read_file`, `write_file`, `list_dir`
- **Providers**: `LocalSandboxProvider` (filesystem) and `AioSandboxProvider` (Docker, in community/)
- **Virtual paths**: `/mnt/user-data/{workspace,uploads,outputs}` → thread-specific physical directories
- **Skills path**: `/mnt/skills` → `deer-flow/skills/` directory
- **Skills loading**: Recursively discovers nested `SKILL.md` files under `skills/{public,custom}` and preserves nested container paths
- **Tools**: `bash`, `ls`, `read_file`, `write_file`, `str_replace`
### Subagent System
Async task delegation with concurrent execution:
- **Built-in agents**: `general-purpose` (full toolset) and `bash` (command specialist)
- **Concurrency**: Max 3 subagents per turn, 15-minute timeout
- **Execution**: Background thread pools with status tracking and SSE events
- **Flow**: Agent calls `task()` tool → executor runs subagent in background → polls for completion → returns result
### Memory System
LLM-powered persistent context retention across conversations:
- **Automatic extraction**: Analyzes conversations for user context, facts, and preferences
- **Structured storage**: User context (work, personal, top-of-mind), history, and confidence-scored facts
- **Debounced updates**: Batches updates to minimize LLM calls (configurable wait time)
- **System prompt injection**: Top facts + context injected into agent prompts
- **Storage**: JSON file with mtime-based cache invalidation
### Tool Ecosystem
| Category | Tools |
|----------|-------|
| **Sandbox** | `bash`, `ls`, `read_file`, `write_file`, `str_replace` |
| **Built-in** | `present_files`, `ask_clarification`, `view_image`, `task` (subagent) |
| **Community** | Tavily (web search), Jina AI (web fetch), Firecrawl (scraping), DuckDuckGo (image search) |
| **MCP** | Any Model Context Protocol server (stdio, SSE, HTTP transports) |
| **Skills** | Domain-specific workflows injected via system prompt |
### Gateway API
FastAPI application providing REST endpoints for frontend integration:
| Route | Purpose |
|-------|---------|
| `GET /api/models` | List available LLM models |
| `GET/PUT /api/mcp/config` | Manage MCP server configurations |
| `GET/PUT /api/skills` | List and manage skills |
| `POST /api/skills/install` | Install skill from `.skill` archive |
| `GET /api/memory` | Retrieve memory data |
| `POST /api/memory/reload` | Force memory reload |
| `GET /api/memory/config` | Memory configuration |
| `GET /api/memory/status` | Combined config + data |
| `POST /api/threads/{id}/uploads` | Upload files (auto-converts PDF/PPT/Excel/Word to Markdown, rejects directory paths) |
| `GET /api/threads/{id}/uploads/list` | List uploaded files |
| `GET /api/threads/{id}/artifacts/{path}` | Serve generated artifacts |
### IM Channels
The IM bridge supports Feishu, Slack, and Telegram. Slack and Telegram still use the final `runs.wait()` response path, while Feishu now streams through `runs.stream(["messages-tuple", "values"])` and updates a single in-thread card in place.
For Feishu card updates, DeerFlow stores the running card's `message_id` per inbound message and patches that same card until the run finishes, preserving the existing `OK` / `DONE` reaction flow.
---
## Quick Start
### Prerequisites
- Python 3.12+
- [uv](https://docs.astral.sh/uv/) package manager
- API keys for your chosen LLM provider
### Installation
```bash
cd deer-flow
# Copy configuration files
cp config.example.yaml config.yaml
# Install backend dependencies
cd backend
make install
```
### Configuration
Edit `config.yaml` in the project root:
```yaml
models:
- name: gpt-4o
display_name: GPT-4o
use: langchain_openai:ChatOpenAI
model: gpt-4o
api_key: $OPENAI_API_KEY
supports_thinking: false
supports_vision: true
- name: gpt-5-responses
display_name: GPT-5 (Responses API)
use: langchain_openai:ChatOpenAI
model: gpt-5
api_key: $OPENAI_API_KEY
use_responses_api: true
output_version: responses/v1
supports_vision: true
```
Set your API keys:
```bash
export OPENAI_API_KEY="your-api-key-here"
```
### Running
**Full Application** (from project root):
```bash
make dev # Starts LangGraph + Gateway + Frontend + Nginx
```
Access at: http://localhost:2026
**Backend Only** (from backend directory):
```bash
# Terminal 1: LangGraph server
make dev
# Terminal 2: Gateway API
make gateway
```
Direct access: LangGraph at http://localhost:2024, Gateway at http://localhost:8001
---
## Project Structure
```
backend/
├── src/
│ ├── agents/ # Agent system
│ │ ├── lead_agent/ # Main agent (factory, prompts)
│ │ ├── middlewares/ # 9 middleware components
│ │ ├── memory/ # Memory extraction & storage
│ │ └── thread_state.py # ThreadState schema
│ ├── gateway/ # FastAPI Gateway API
│ │ ├── app.py # Application setup
│ │ └── routers/ # 6 route modules
│ ├── sandbox/ # Sandbox execution
│ │ ├── local/ # Local filesystem provider
│ │ ├── sandbox.py # Abstract interface
│ │ ├── tools.py # bash, ls, read/write/str_replace
│ │ └── middleware.py # Sandbox lifecycle
│ ├── subagents/ # Subagent delegation
│ │ ├── builtins/ # general-purpose, bash agents
│ │ ├── executor.py # Background execution engine
│ │ └── registry.py # Agent registry
│ ├── tools/builtins/ # Built-in tools
│ ├── mcp/ # MCP protocol integration
│ ├── models/ # Model factory
│ ├── skills/ # Skill discovery & loading
│ ├── config/ # Configuration system
│ ├── community/ # Community tools & providers
│ ├── reflection/ # Dynamic module loading
│ └── utils/ # Utilities
├── docs/ # Documentation
├── tests/ # Test suite
├── langgraph.json # LangGraph server configuration
├── pyproject.toml # Python dependencies
├── Makefile # Development commands
└── Dockerfile # Container build
```
---
## Configuration
### Main Configuration (`config.yaml`)
Place in project root. Config values starting with `$` resolve as environment variables.
Key sections:
- `models` - LLM configurations with class paths, API keys, thinking/vision flags
- `tools` - Tool definitions with module paths and groups
- `tool_groups` - Logical tool groupings
- `sandbox` - Execution environment provider
- `skills` - Skills directory paths
- `title` - Auto-title generation settings
- `summarization` - Context summarization settings
- `subagents` - Subagent system (enabled/disabled)
- `memory` - Memory system settings (enabled, storage, debounce, facts limits)
Provider note:
- `models[*].use` references provider classes by module path (for example `langchain_openai:ChatOpenAI`).
- If a provider module is missing, DeerFlow now returns an actionable error with install guidance (for example `uv add langchain-google-genai`).
### Extensions Configuration (`extensions_config.json`)
MCP servers and skill states in a single file:
```json
{
"mcpServers": {
"github": {
"enabled": true,
"type": "stdio",
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-github"],
"env": {"GITHUB_TOKEN": "$GITHUB_TOKEN"}
},
"secure-http": {
"enabled": true,
"type": "http",
"url": "https://api.example.com/mcp",
"oauth": {
"enabled": true,
"token_url": "https://auth.example.com/oauth/token",
"grant_type": "client_credentials",
"client_id": "$MCP_OAUTH_CLIENT_ID",
"client_secret": "$MCP_OAUTH_CLIENT_SECRET"
}
}
},
"skills": {
"pdf-processing": {"enabled": true}
}
}
```
### Environment Variables
- `DEER_FLOW_CONFIG_PATH` - Override config.yaml location
- `DEER_FLOW_EXTENSIONS_CONFIG_PATH` - Override extensions_config.json location
- Model API keys: `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, `DEEPSEEK_API_KEY`, etc.
- Tool API keys: `TAVILY_API_KEY`, `GITHUB_TOKEN`, etc.
---
## Development
### Commands
```bash
make install # Install dependencies
make dev # Run LangGraph server (port 2024)
make gateway # Run Gateway API (port 8001)
make lint # Run linter (ruff)
make format # Format code (ruff)
```
### Code Style
- **Linter/Formatter**: `ruff`
- **Line length**: 240 characters
- **Python**: 3.12+ with type hints
- **Quotes**: Double quotes
- **Indentation**: 4 spaces
### Testing
```bash
uv run pytest
```
---
## Technology Stack
- **LangGraph** (1.0.6+) - Agent framework and multi-agent orchestration
- **LangChain** (1.2.3+) - LLM abstractions and tool system
- **FastAPI** (0.115.0+) - Gateway REST API
- **langchain-mcp-adapters** - Model Context Protocol support
- **agent-sandbox** - Sandboxed code execution
- **markitdown** - Multi-format document conversion
- **tavily-python** / **firecrawl-py** - Web search and scraping
---
## Documentation
- [Configuration Guide](docs/CONFIGURATION.md)
- [Architecture Details](docs/ARCHITECTURE.md)
- [API Reference](docs/API.md)
- [File Upload](docs/FILE_UPLOAD.md)
- [Path Examples](docs/PATH_EXAMPLES.md)
- [Context Summarization](docs/summarization.md)
- [Plan Mode](docs/plan_mode_usage.md)
- [Setup Guide](docs/SETUP.md)
---
## License
See the [LICENSE](../LICENSE) file in the project root.
## Contributing
See [CONTRIBUTING.md](CONTRIBUTING.md) for contribution guidelines.
================================================
FILE: backend/app/__init__.py
================================================
================================================
FILE: backend/app/channels/__init__.py
================================================
"""IM Channel integration for DeerFlow.
Provides a pluggable channel system that connects external messaging platforms
(Feishu/Lark, Slack, Telegram) to the DeerFlow agent via the ChannelManager,
which uses ``langgraph-sdk`` to communicate with the underlying LangGraph Server.
"""
from app.channels.base import Channel
from app.channels.message_bus import InboundMessage, MessageBus, OutboundMessage
__all__ = [
"Channel",
"InboundMessage",
"MessageBus",
"OutboundMessage",
]
================================================
FILE: backend/app/channels/base.py
================================================
"""Abstract base class for IM channels."""
from __future__ import annotations
import logging
from abc import ABC, abstractmethod
from typing import Any
from app.channels.message_bus import InboundMessage, InboundMessageType, MessageBus, OutboundMessage, ResolvedAttachment
logger = logging.getLogger(__name__)
class Channel(ABC):
"""Base class for all IM channel implementations.
Each channel connects to an external messaging platform and:
1. Receives messages, wraps them as InboundMessage, publishes to the bus.
2. Subscribes to outbound messages and sends replies back to the platform.
Subclasses must implement ``start``, ``stop``, and ``send``.
"""
def __init__(self, name: str, bus: MessageBus, config: dict[str, Any]) -> None:
self.name = name
self.bus = bus
self.config = config
self._running = False
@property
def is_running(self) -> bool:
return self._running
# -- lifecycle ---------------------------------------------------------
@abstractmethod
async def start(self) -> None:
"""Start listening for messages from the external platform."""
@abstractmethod
async def stop(self) -> None:
"""Gracefully stop the channel."""
# -- outbound ----------------------------------------------------------
@abstractmethod
async def send(self, msg: OutboundMessage) -> None:
"""Send a message back to the external platform.
The implementation should use ``msg.chat_id`` and ``msg.thread_ts``
to route the reply to the correct conversation/thread.
"""
async def send_file(self, msg: OutboundMessage, attachment: ResolvedAttachment) -> bool:
"""Upload a single file attachment to the platform.
Returns True if the upload succeeded, False otherwise.
Default implementation returns False (no file upload support).
"""
return False
# -- helpers -----------------------------------------------------------
def _make_inbound(
self,
chat_id: str,
user_id: str,
text: str,
*,
msg_type: InboundMessageType = InboundMessageType.CHAT,
thread_ts: str | None = None,
files: list[dict[str, Any]] | None = None,
metadata: dict[str, Any] | None = None,
) -> InboundMessage:
"""Convenience factory for creating InboundMessage instances."""
return InboundMessage(
channel_name=self.name,
chat_id=chat_id,
user_id=user_id,
text=text,
msg_type=msg_type,
thread_ts=thread_ts,
files=files or [],
metadata=metadata or {},
)
async def _on_outbound(self, msg: OutboundMessage) -> None:
"""Outbound callback registered with the bus.
Only forwards messages targeted at this channel.
Sends the text message first, then uploads any file attachments.
File uploads are skipped entirely when the text send fails to avoid
partial deliveries (files without accompanying text).
"""
if msg.channel_name == self.name:
try:
await self.send(msg)
except Exception:
logger.exception("Failed to send outbound message on channel %s", self.name)
return # Do not attempt file uploads when the text message failed
for attachment in msg.attachments:
try:
success = await self.send_file(msg, attachment)
if not success:
logger.warning("[%s] file upload skipped for %s", self.name, attachment.filename)
except Exception:
logger.exception("[%s] failed to upload file %s", self.name, attachment.filename)
================================================
FILE: backend/app/channels/feishu.py
================================================
"""Feishu/Lark channel — connects to Feishu via WebSocket (no public IP needed)."""
from __future__ import annotations
import asyncio
import json
import logging
import threading
from typing import Any
from app.channels.base import Channel
from app.channels.message_bus import InboundMessageType, MessageBus, OutboundMessage, ResolvedAttachment
logger = logging.getLogger(__name__)
class FeishuChannel(Channel):
"""Feishu/Lark IM channel using the ``lark-oapi`` WebSocket client.
Configuration keys (in ``config.yaml`` under ``channels.feishu``):
- ``app_id``: Feishu app ID.
- ``app_secret``: Feishu app secret.
- ``verification_token``: (optional) Event verification token.
The channel uses WebSocket long-connection mode so no public IP is required.
Message flow:
1. User sends a message → bot adds "OK" emoji reaction
2. Bot replies in thread: "Working on it......"
3. Agent processes the message and returns a result
4. Bot replies in thread with the result
5. Bot adds "DONE" emoji reaction to the original message
"""
def __init__(self, bus: MessageBus, config: dict[str, Any]) -> None:
super().__init__(name="feishu", bus=bus, config=config)
self._thread: threading.Thread | None = None
self._main_loop: asyncio.AbstractEventLoop | None = None
self._api_client = None
self._CreateMessageReactionRequest = None
self._CreateMessageReactionRequestBody = None
self._Emoji = None
self._PatchMessageRequest = None
self._PatchMessageRequestBody = None
self._background_tasks: set[asyncio.Task] = set()
self._running_card_ids: dict[str, str] = {}
self._running_card_tasks: dict[str, asyncio.Task] = {}
self._CreateFileRequest = None
self._CreateFileRequestBody = None
self._CreateImageRequest = None
self._CreateImageRequestBody = None
async def start(self) -> None:
if self._running:
return
try:
import lark_oapi as lark
from lark_oapi.api.im.v1 import (
CreateFileRequest,
CreateFileRequestBody,
CreateImageRequest,
CreateImageRequestBody,
CreateMessageReactionRequest,
CreateMessageReactionRequestBody,
CreateMessageRequest,
CreateMessageRequestBody,
Emoji,
PatchMessageRequest,
PatchMessageRequestBody,
ReplyMessageRequest,
ReplyMessageRequestBody,
)
except ImportError:
logger.error("lark-oapi is not installed. Install it with: uv add lark-oapi")
return
self._lark = lark
self._CreateMessageRequest = CreateMessageRequest
self._CreateMessageRequestBody = CreateMessageRequestBody
self._ReplyMessageRequest = ReplyMessageRequest
self._ReplyMessageRequestBody = ReplyMessageRequestBody
self._CreateMessageReactionRequest = CreateMessageReactionRequest
self._CreateMessageReactionRequestBody = CreateMessageReactionRequestBody
self._Emoji = Emoji
self._PatchMessageRequest = PatchMessageRequest
self._PatchMessageRequestBody = PatchMessageRequestBody
self._CreateFileRequest = CreateFileRequest
self._CreateFileRequestBody = CreateFileRequestBody
self._CreateImageRequest = CreateImageRequest
self._CreateImageRequestBody = CreateImageRequestBody
app_id = self.config.get("app_id", "")
app_secret = self.config.get("app_secret", "")
if not app_id or not app_secret:
logger.error("Feishu channel requires app_id and app_secret")
return
self._api_client = lark.Client.builder().app_id(app_id).app_secret(app_secret).build()
self._main_loop = asyncio.get_event_loop()
self._running = True
self.bus.subscribe_outbound(self._on_outbound)
# Both ws.Client construction and start() must happen in a dedicated
# thread with its own event loop. lark-oapi caches the running loop
# at construction time and later calls loop.run_until_complete(),
# which conflicts with an already-running uvloop.
self._thread = threading.Thread(
target=self._run_ws,
args=(app_id, app_secret),
daemon=True,
)
self._thread.start()
logger.info("Feishu channel started")
def _run_ws(self, app_id: str, app_secret: str) -> None:
"""Construct and run the lark WS client in a thread with a fresh event loop.
The lark-oapi SDK captures a module-level event loop at import time
(``lark_oapi.ws.client.loop``). When uvicorn uses uvloop, that
captured loop is the *main* thread's uvloop — which is already
running, so ``loop.run_until_complete()`` inside ``Client.start()``
raises ``RuntimeError``.
We work around this by creating a plain asyncio event loop for this
thread and patching the SDK's module-level reference before calling
``start()``.
"""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
import lark_oapi as lark
import lark_oapi.ws.client as _ws_client_mod
# Replace the SDK's module-level loop so Client.start() uses
# this thread's (non-running) event loop instead of the main
# thread's uvloop.
_ws_client_mod.loop = loop
event_handler = lark.EventDispatcherHandler.builder("", "").register_p2_im_message_receive_v1(self._on_message).build()
ws_client = lark.ws.Client(
app_id=app_id,
app_secret=app_secret,
event_handler=event_handler,
log_level=lark.LogLevel.INFO,
)
ws_client.start()
except Exception:
if self._running:
logger.exception("Feishu WebSocket error")
async def stop(self) -> None:
self._running = False
self.bus.unsubscribe_outbound(self._on_outbound)
for task in list(self._background_tasks):
task.cancel()
self._background_tasks.clear()
for task in list(self._running_card_tasks.values()):
task.cancel()
self._running_card_tasks.clear()
if self._thread:
self._thread.join(timeout=5)
self._thread = None
logger.info("Feishu channel stopped")
async def send(self, msg: OutboundMessage, *, _max_retries: int = 3) -> None:
if not self._api_client:
logger.warning("[Feishu] send called but no api_client available")
return
logger.info(
"[Feishu] sending reply: chat_id=%s, thread_ts=%s, text_len=%d",
msg.chat_id,
msg.thread_ts,
len(msg.text),
)
last_exc: Exception | None = None
for attempt in range(_max_retries):
try:
await self._send_card_message(msg)
return # success
except Exception as exc:
last_exc = exc
if attempt < _max_retries - 1:
delay = 2**attempt # 1s, 2s
logger.warning(
"[Feishu] send failed (attempt %d/%d), retrying in %ds: %s",
attempt + 1,
_max_retries,
delay,
exc,
)
await asyncio.sleep(delay)
logger.error("[Feishu] send failed after %d attempts: %s", _max_retries, last_exc)
raise last_exc # type: ignore[misc]
async def send_file(self, msg: OutboundMessage, attachment: ResolvedAttachment) -> bool:
if not self._api_client:
return False
# Check size limits (image: 10MB, file: 30MB)
if attachment.is_image and attachment.size > 10 * 1024 * 1024:
logger.warning("[Feishu] image too large (%d bytes), skipping: %s", attachment.size, attachment.filename)
return False
if not attachment.is_image and attachment.size > 30 * 1024 * 1024:
logger.warning("[Feishu] file too large (%d bytes), skipping: %s", attachment.size, attachment.filename)
return False
try:
if attachment.is_image:
file_key = await self._upload_image(attachment.actual_path)
msg_type = "image"
content = json.dumps({"image_key": file_key})
else:
file_key = await self._upload_file(attachment.actual_path, attachment.filename)
msg_type = "file"
content = json.dumps({"file_key": file_key})
if msg.thread_ts:
request = self._ReplyMessageRequest.builder().message_id(msg.thread_ts).request_body(self._ReplyMessageRequestBody.builder().msg_type(msg_type).content(content).reply_in_thread(True).build()).build()
await asyncio.to_thread(self._api_client.im.v1.message.reply, request)
else:
request = self._CreateMessageRequest.builder().receive_id_type("chat_id").request_body(self._CreateMessageRequestBody.builder().receive_id(msg.chat_id).msg_type(msg_type).content(content).build()).build()
await asyncio.to_thread(self._api_client.im.v1.message.create, request)
logger.info("[Feishu] file sent: %s (type=%s)", attachment.filename, msg_type)
return True
except Exception:
logger.exception("[Feishu] failed to upload/send file: %s", attachment.filename)
return False
async def _upload_image(self, path) -> str:
"""Upload an image to Feishu and return the image_key."""
with open(str(path), "rb") as f:
request = self._CreateImageRequest.builder().request_body(self._CreateImageRequestBody.builder().image_type("message").image(f).build()).build()
response = await asyncio.to_thread(self._api_client.im.v1.image.create, request)
if not response.success():
raise RuntimeError(f"Feishu image upload failed: code={response.code}, msg={response.msg}")
return response.data.image_key
async def _upload_file(self, path, filename: str) -> str:
"""Upload a file to Feishu and return the file_key."""
suffix = path.suffix.lower() if hasattr(path, "suffix") else ""
if suffix in (".xls", ".xlsx", ".csv"):
file_type = "xls"
elif suffix in (".ppt", ".pptx"):
file_type = "ppt"
elif suffix == ".pdf":
file_type = "pdf"
elif suffix in (".doc", ".docx"):
file_type = "doc"
else:
file_type = "stream"
with open(str(path), "rb") as f:
request = self._CreateFileRequest.builder().request_body(self._CreateFileRequestBody.builder().file_type(file_type).file_name(filename).file(f).build()).build()
response = await asyncio.to_thread(self._api_client.im.v1.file.create, request)
if not response.success():
raise RuntimeError(f"Feishu file upload failed: code={response.code}, msg={response.msg}")
return response.data.file_key
# -- message formatting ------------------------------------------------
@staticmethod
def _build_card_content(text: str) -> str:
"""Build a Feishu interactive card with markdown content.
Feishu's interactive card format natively renders markdown, including
headers, bold/italic, code blocks, lists, and links.
"""
card = {
"config": {"wide_screen_mode": True, "update_multi": True},
"elements": [{"tag": "markdown", "content": text}],
}
return json.dumps(card)
# -- reaction helpers --------------------------------------------------
async def _add_reaction(self, message_id: str, emoji_type: str = "THUMBSUP") -> None:
"""Add an emoji reaction to a message."""
if not self._api_client or not self._CreateMessageReactionRequest:
return
try:
request = self._CreateMessageReactionRequest.builder().message_id(message_id).request_body(self._CreateMessageReactionRequestBody.builder().reaction_type(self._Emoji.builder().emoji_type(emoji_type).build()).build()).build()
await asyncio.to_thread(self._api_client.im.v1.message_reaction.create, request)
logger.info("[Feishu] reaction '%s' added to message %s", emoji_type, message_id)
except Exception:
logger.exception("[Feishu] failed to add reaction '%s' to message %s", emoji_type, message_id)
async def _reply_card(self, message_id: str, text: str) -> str | None:
"""Reply with an interactive card and return the created card message ID."""
if not self._api_client:
return None
content = self._build_card_content(text)
request = self._ReplyMessageRequest.builder().message_id(message_id).request_body(self._ReplyMessageRequestBody.builder().msg_type("interactive").content(content).reply_in_thread(True).build()).build()
response = await asyncio.to_thread(self._api_client.im.v1.message.reply, request)
response_data = getattr(response, "data", None)
return getattr(response_data, "message_id", None)
async def _create_card(self, chat_id: str, text: str) -> None:
"""Create a new card message in the target chat."""
if not self._api_client:
return
content = self._build_card_content(text)
request = self._CreateMessageRequest.builder().receive_id_type("chat_id").request_body(self._CreateMessageRequestBody.builder().receive_id(chat_id).msg_type("interactive").content(content).build()).build()
await asyncio.to_thread(self._api_client.im.v1.message.create, request)
async def _update_card(self, message_id: str, text: str) -> None:
"""Patch an existing card message in place."""
if not self._api_client or not self._PatchMessageRequest:
return
content = self._build_card_content(text)
request = self._PatchMessageRequest.builder().message_id(message_id).request_body(self._PatchMessageRequestBody.builder().content(content).build()).build()
await asyncio.to_thread(self._api_client.im.v1.message.patch, request)
def _track_background_task(self, task: asyncio.Task, *, name: str, msg_id: str) -> None:
"""Keep a strong reference to fire-and-forget tasks and surface errors."""
self._background_tasks.add(task)
task.add_done_callback(lambda done_task, task_name=name, mid=msg_id: self._finalize_background_task(done_task, task_name, mid))
def _finalize_background_task(self, task: asyncio.Task, name: str, msg_id: str) -> None:
self._background_tasks.discard(task)
self._log_task_error(task, name, msg_id)
async def _create_running_card(self, source_message_id: str, text: str) -> str | None:
"""Create the running card and cache its message ID when available."""
running_card_id = await self._reply_card(source_message_id, text)
if running_card_id:
self._running_card_ids[source_message_id] = running_card_id
logger.info("[Feishu] running card created: source=%s card=%s", source_message_id, running_card_id)
else:
logger.warning("[Feishu] running card creation returned no message_id for source=%s, subsequent updates will fall back to new replies", source_message_id)
return running_card_id
def _ensure_running_card_started(self, source_message_id: str, text: str = "Working on it...") -> asyncio.Task | None:
"""Start running-card creation once per source message."""
running_card_id = self._running_card_ids.get(source_message_id)
if running_card_id:
return None
running_card_task = self._running_card_tasks.get(source_message_id)
if running_card_task:
return running_card_task
running_card_task = asyncio.create_task(self._create_running_card(source_message_id, text))
self._running_card_tasks[source_message_id] = running_card_task
running_card_task.add_done_callback(lambda done_task, mid=source_message_id: self._finalize_running_card_task(mid, done_task))
return running_card_task
def _finalize_running_card_task(self, source_message_id: str, task: asyncio.Task) -> None:
if self._running_card_tasks.get(source_message_id) is task:
self._running_card_tasks.pop(source_message_id, None)
self._log_task_error(task, "create_running_card", source_message_id)
async def _ensure_running_card(self, source_message_id: str, text: str = "Working on it...") -> str | None:
"""Ensure the in-thread running card exists and track its message ID."""
running_card_id = self._running_card_ids.get(source_message_id)
if running_card_id:
return running_card_id
running_card_task = self._ensure_running_card_started(source_message_id, text)
if running_card_task is None:
return self._running_card_ids.get(source_message_id)
return await running_card_task
async def _send_running_reply(self, message_id: str) -> None:
"""Reply to a message in-thread with a running card."""
try:
await self._ensure_running_card(message_id)
except Exception:
logger.exception("[Feishu] failed to send running reply for message %s", message_id)
async def _send_card_message(self, msg: OutboundMessage) -> None:
"""Send or update the Feishu card tied to the current request."""
source_message_id = msg.thread_ts
if source_message_id:
running_card_id = self._running_card_ids.get(source_message_id)
awaited_running_card_task = False
if not running_card_id:
running_card_task = self._running_card_tasks.get(source_message_id)
if running_card_task:
awaited_running_card_task = True
running_card_id = await running_card_task
if running_card_id:
try:
await self._update_card(running_card_id, msg.text)
except Exception:
if not msg.is_final:
raise
logger.exception(
"[Feishu] failed to patch running card %s, falling back to final reply",
running_card_id,
)
await self._reply_card(source_message_id, msg.text)
else:
logger.info("[Feishu] running card updated: source=%s card=%s", source_message_id, running_card_id)
elif msg.is_final:
await self._reply_card(source_message_id, msg.text)
elif awaited_running_card_task:
logger.warning(
"[Feishu] running card task finished without message_id for source=%s, skipping duplicate non-final creation",
source_message_id,
)
else:
await self._ensure_running_card(source_message_id, msg.text)
if msg.is_final:
self._running_card_ids.pop(source_message_id, None)
await self._add_reaction(source_message_id, "DONE")
return
await self._create_card(msg.chat_id, msg.text)
# -- internal ----------------------------------------------------------
@staticmethod
def _log_future_error(fut, name: str, msg_id: str) -> None:
"""Callback for run_coroutine_threadsafe futures to surface errors."""
try:
exc = fut.exception()
if exc:
logger.error("[Feishu] %s failed for msg_id=%s: %s", name, msg_id, exc)
except Exception:
pass
@staticmethod
def _log_task_error(task: asyncio.Task, name: str, msg_id: str) -> None:
"""Callback for background asyncio tasks to surface errors."""
try:
exc = task.exception()
if exc:
logger.error("[Feishu] %s failed for msg_id=%s: %s", name, msg_id, exc)
except asyncio.CancelledError:
logger.info("[Feishu] %s cancelled for msg_id=%s", name, msg_id)
except Exception:
pass
async def _prepare_inbound(self, msg_id: str, inbound) -> None:
"""Kick off Feishu side effects without delaying inbound dispatch."""
reaction_task = asyncio.create_task(self._add_reaction(msg_id, "OK"))
self._track_background_task(reaction_task, name="add_reaction", msg_id=msg_id)
self._ensure_running_card_started(msg_id)
await self.bus.publish_inbound(inbound)
def _on_message(self, event) -> None:
"""Called by lark-oapi when a message is received (runs in lark thread)."""
try:
logger.info("[Feishu] raw event received: type=%s", type(event).__name__)
message = event.event.message
chat_id = message.chat_id
msg_id = message.message_id
sender_id = event.event.sender.sender_id.open_id
# root_id is set when the message is a reply within a Feishu thread.
# Use it as topic_id so all replies share the same DeerFlow thread.
root_id = getattr(message, "root_id", None) or None
# Parse message content
content = json.loads(message.content)
if "text" in content:
# Handle plain text messages
text = content["text"]
elif "content" in content and isinstance(content["content"], list):
# Handle rich-text messages with a top-level "content" list (e.g., topic groups/posts)
text_paragraphs: list[str] = []
for paragraph in content["content"]:
if isinstance(paragraph, list):
paragraph_text_parts: list[str] = []
for element in paragraph:
if isinstance(element, dict):
# Include both normal text and @ mentions
if element.get("tag") in ("text", "at"):
text_value = element.get("text", "")
if text_value:
paragraph_text_parts.append(text_value)
if paragraph_text_parts:
# Join text segments within a paragraph with spaces to avoid "helloworld"
text_paragraphs.append(" ".join(paragraph_text_parts))
# Join paragraphs with blank lines to preserve paragraph boundaries
text = "\n\n".join(text_paragraphs)
else:
text = ""
text = text.strip()
logger.info(
"[Feishu] parsed message: chat_id=%s, msg_id=%s, root_id=%s, sender=%s, text=%r",
chat_id,
msg_id,
root_id,
sender_id,
text[:100] if text else "",
)
if not text:
logger.info("[Feishu] empty text, ignoring message")
return
# Check if it's a command
if text.startswith("/"):
msg_type = InboundMessageType.COMMAND
else:
msg_type = InboundMessageType.CHAT
# topic_id: use root_id for replies (same topic), msg_id for new messages (new topic)
topic_id = root_id or msg_id
inbound = self._make_inbound(
chat_id=chat_id,
user_id=sender_id,
text=text,
msg_type=msg_type,
thread_ts=msg_id,
metadata={"message_id": msg_id, "root_id": root_id},
)
inbound.topic_id = topic_id
# Schedule on the async event loop
if self._main_loop and self._main_loop.is_running():
logger.info("[Feishu] publishing inbound message to bus (type=%s, msg_id=%s)", msg_type.value, msg_id)
fut = asyncio.run_coroutine_threadsafe(self._prepare_inbound(msg_id, inbound), self._main_loop)
fut.add_done_callback(lambda f, mid=msg_id: self._log_future_error(f, "prepare_inbound", mid))
else:
logger.warning("[Feishu] main loop not running, cannot publish inbound message")
except Exception:
logger.exception("[Feishu] error processing message")
================================================
FILE: backend/app/channels/manager.py
================================================
"""ChannelManager — consumes inbound messages and dispatches them to the DeerFlow agent via LangGraph Server."""
from __future__ import annotations
import asyncio
import logging
import mimetypes
import time
from collections.abc import Mapping
from typing import Any
from app.channels.message_bus import InboundMessage, InboundMessageType, MessageBus, OutboundMessage, ResolvedAttachment
from app.channels.store import ChannelStore
logger = logging.getLogger(__name__)
DEFAULT_LANGGRAPH_URL = "http://localhost:2024"
DEFAULT_GATEWAY_URL = "http://localhost:8001"
DEFAULT_ASSISTANT_ID = "lead_agent"
DEFAULT_RUN_CONFIG: dict[str, Any] = {"recursion_limit": 100}
DEFAULT_RUN_CONTEXT: dict[str, Any] = {
"thinking_enabled": True,
"is_plan_mode": False,
"subagent_enabled": False,
}
STREAM_UPDATE_MIN_INTERVAL_SECONDS = 0.35
CHANNEL_CAPABILITIES = {
"feishu": {"supports_streaming": True},
"slack": {"supports_streaming": False},
"telegram": {"supports_streaming": False},
}
def _as_dict(value: Any) -> dict[str, Any]:
return dict(value) if isinstance(value, Mapping) else {}
def _merge_dicts(*layers: Any) -> dict[str, Any]:
merged: dict[str, Any] = {}
for layer in layers:
if isinstance(layer, Mapping):
merged.update(layer)
return merged
def _extract_response_text(result: dict | list) -> str:
"""Extract the last AI message text from a LangGraph runs.wait result.
``runs.wait`` returns the final state dict which contains a ``messages``
list. Each message is a dict with at least ``type`` and ``content``.
Handles special cases:
- Regular AI text responses
- Clarification interrupts (``ask_clarification`` tool messages)
- AI messages with tool_calls but no text content
"""
if isinstance(result, list):
messages = result
elif isinstance(result, dict):
messages = result.get("messages", [])
else:
return ""
# Walk backwards to find usable response text, but stop at the last
# human message to avoid returning text from a previous turn.
for msg in reversed(messages):
if not isinstance(msg, dict):
continue
msg_type = msg.get("type")
# Stop at the last human message — anything before it is a previous turn
if msg_type == "human":
break
# Check for tool messages from ask_clarification (interrupt case)
if msg_type == "tool" and msg.get("name") == "ask_clarification":
content = msg.get("content", "")
if isinstance(content, str) and content:
return content
# Regular AI message with text content
if msg_type == "ai":
content = msg.get("content", "")
if isinstance(content, str) and content:
return content
# content can be a list of content blocks
if isinstance(content, list):
parts = []
for block in content:
if isinstance(block, dict) and block.get("type") == "text":
parts.append(block.get("text", ""))
elif isinstance(block, str):
parts.append(block)
text = "".join(parts)
if text:
return text
return ""
def _extract_text_content(content: Any) -> str:
"""Extract text from a streaming payload content field."""
if isinstance(content, str):
return content
if isinstance(content, list):
parts: list[str] = []
for block in content:
if isinstance(block, str):
parts.append(block)
elif isinstance(block, Mapping):
text = block.get("text")
if isinstance(text, str):
parts.append(text)
else:
nested = block.get("content")
if isinstance(nested, str):
parts.append(nested)
return "".join(parts)
if isinstance(content, Mapping):
for key in ("text", "content"):
value = content.get(key)
if isinstance(value, str):
return value
return ""
def _merge_stream_text(existing: str, chunk: str) -> str:
"""Merge either delta text or cumulative text into a single snapshot."""
if not chunk:
return existing
if not existing or chunk == existing:
return chunk or existing
if chunk.startswith(existing):
return chunk
if existing.endswith(chunk):
return existing
return existing + chunk
def _extract_stream_message_id(payload: Any, metadata: Any) -> str | None:
"""Best-effort extraction of the streamed AI message identifier."""
candidates = [payload, metadata]
if isinstance(payload, Mapping):
candidates.append(payload.get("kwargs"))
for candidate in candidates:
if not isinstance(candidate, Mapping):
continue
for key in ("id", "message_id"):
value = candidate.get(key)
if isinstance(value, str) and value:
return value
return None
def _accumulate_stream_text(
buffers: dict[str, str],
current_message_id: str | None,
event_data: Any,
) -> tuple[str | None, str | None]:
"""Convert a ``messages-tuple`` event into the latest displayable AI text."""
payload = event_data
metadata: Any = None
if isinstance(event_data, (list, tuple)):
if event_data:
payload = event_data[0]
if len(event_data) > 1:
metadata = event_data[1]
if isinstance(payload, str):
message_id = current_message_id or "__default__"
buffers[message_id] = _merge_stream_text(buffers.get(message_id, ""), payload)
return buffers[message_id], message_id
if not isinstance(payload, Mapping):
return None, current_message_id
payload_type = str(payload.get("type", "")).lower()
if "tool" in payload_type:
return None, current_message_id
text = _extract_text_content(payload.get("content"))
if not text and isinstance(payload.get("kwargs"), Mapping):
text = _extract_text_content(payload["kwargs"].get("content"))
if not text:
return None, current_message_id
message_id = _extract_stream_message_id(payload, metadata) or current_message_id or "__default__"
buffers[message_id] = _merge_stream_text(buffers.get(message_id, ""), text)
return buffers[message_id], message_id
def _extract_artifacts(result: dict | list) -> list[str]:
"""Extract artifact paths from the last AI response cycle only.
Instead of reading the full accumulated ``artifacts`` state (which contains
all artifacts ever produced in the thread), this inspects the messages after
the last human message and collects file paths from ``present_files`` tool
calls. This ensures only newly-produced artifacts are returned.
"""
if isinstance(result, list):
messages = result
elif isinstance(result, dict):
messages = result.get("messages", [])
else:
return []
artifacts: list[str] = []
for msg in reversed(messages):
if not isinstance(msg, dict):
continue
# Stop at the last human message — anything before it is a previous turn
if msg.get("type") == "human":
break
# Look for AI messages with present_files tool calls
if msg.get("type") == "ai":
for tc in msg.get("tool_calls", []):
if isinstance(tc, dict) and tc.get("name") == "present_files":
args = tc.get("args", {})
paths = args.get("filepaths", [])
if isinstance(paths, list):
artifacts.extend(p for p in paths if isinstance(p, str))
return artifacts
def _format_artifact_text(artifacts: list[str]) -> str:
"""Format artifact paths into a human-readable text block listing filenames."""
import posixpath
filenames = [posixpath.basename(p) for p in artifacts]
if len(filenames) == 1:
return f"Created File: 📎 {filenames[0]}"
return "Created Files: 📎 " + "、".join(filenames)
_OUTPUTS_VIRTUAL_PREFIX = "/mnt/user-data/outputs/"
def _resolve_attachments(thread_id: str, artifacts: list[str]) -> list[ResolvedAttachment]:
"""Resolve virtual artifact paths to host filesystem paths with metadata.
Only paths under ``/mnt/user-data/outputs/`` are accepted; any other
virtual path is rejected with a warning to prevent exfiltrating uploads
or workspace files via IM channels.
Skips artifacts that cannot be resolved (missing files, invalid paths)
and logs warnings for them.
"""
from deerflow.config.paths import get_paths
attachments: list[ResolvedAttachment] = []
paths = get_paths()
outputs_dir = paths.sandbox_outputs_dir(thread_id).resolve()
for virtual_path in artifacts:
# Security: only allow files from the agent outputs directory
if not virtual_path.startswith(_OUTPUTS_VIRTUAL_PREFIX):
logger.warning("[Manager] rejected non-outputs artifact path: %s", virtual_path)
continue
try:
actual = paths.resolve_virtual_path(thread_id, virtual_path)
# Verify the resolved path is actually under the outputs directory
# (guards against path-traversal even after prefix check)
try:
actual.resolve().relative_to(outputs_dir)
except ValueError:
logger.warning("[Manager] artifact path escapes outputs dir: %s -> %s", virtual_path, actual)
continue
if not actual.is_file():
logger.warning("[Manager] artifact not found on disk: %s -> %s", virtual_path, actual)
continue
mime, _ = mimetypes.guess_type(str(actual))
mime = mime or "application/octet-stream"
attachments.append(
ResolvedAttachment(
virtual_path=virtual_path,
actual_path=actual,
filename=actual.name,
mime_type=mime,
size=actual.stat().st_size,
is_image=mime.startswith("image/"),
)
)
except (ValueError, OSError) as exc:
logger.warning("[Manager] failed to resolve artifact %s: %s", virtual_path, exc)
return attachments
def _prepare_artifact_delivery(
thread_id: str,
response_text: str,
artifacts: list[str],
) -> tuple[str, list[ResolvedAttachment]]:
"""Resolve attachments and append filename fallbacks to the text response."""
attachments: list[ResolvedAttachment] = []
if not artifacts:
return response_text, attachments
attachments = _resolve_attachments(thread_id, artifacts)
resolved_virtuals = {attachment.virtual_path for attachment in attachments}
unresolved = [path for path in artifacts if path not in resolved_virtuals]
if unresolved:
artifact_text = _format_artifact_text(unresolved)
response_text = (response_text + "\n\n" + artifact_text) if response_text else artifact_text
# Always include resolved attachment filenames as a text fallback so files
# remain discoverable even when the upload is skipped or fails.
if attachments:
resolved_text = _format_artifact_text([attachment.virtual_path for attachment in attachments])
response_text = (response_text + "\n\n" + resolved_text) if response_text else resolved_text
return response_text, attachments
class ChannelManager:
"""Core dispatcher that bridges IM channels to the DeerFlow agent.
It reads from the MessageBus inbound queue, creates/reuses threads on
the LangGraph Server, sends messages via ``runs.wait``, and publishes
outbound responses back through the bus.
"""
def __init__(
self,
bus: MessageBus,
store: ChannelStore,
*,
max_concurrency: int = 5,
langgraph_url: str = DEFAULT_LANGGRAPH_URL,
gateway_url: str = DEFAULT_GATEWAY_URL,
assistant_id: str = DEFAULT_ASSISTANT_ID,
default_session: dict[str, Any] | None = None,
channel_sessions: dict[str, Any] | None = None,
) -> None:
self.bus = bus
self.store = store
self._max_concurrency = max_concurrency
self._langgraph_url = langgraph_url
self._gateway_url = gateway_url
self._assistant_id = assistant_id
self._default_session = _as_dict(default_session)
self._channel_sessions = dict(channel_sessions or {})
self._client = None # lazy init — langgraph_sdk async client
self._semaphore: asyncio.Semaphore | None = None
self._running = False
self._task: asyncio.Task | None = None
@staticmethod
def _channel_supports_streaming(channel_name: str) -> bool:
return CHANNEL_CAPABILITIES.get(channel_name, {}).get("supports_streaming", False)
def _resolve_session_layer(self, msg: InboundMessage) -> tuple[dict[str, Any], dict[str, Any]]:
channel_layer = _as_dict(self._channel_sessions.get(msg.channel_name))
users_layer = _as_dict(channel_layer.get("users"))
user_layer = _as_dict(users_layer.get(msg.user_id))
return channel_layer, user_layer
def _resolve_run_params(self, msg: InboundMessage, thread_id: str) -> tuple[str, dict[str, Any], dict[str, Any]]:
channel_layer, user_layer = self._resolve_session_layer(msg)
assistant_id = user_layer.get("assistant_id") or channel_layer.get("assistant_id") or self._default_session.get("assistant_id") or self._assistant_id
if not isinstance(assistant_id, str) or not assistant_id.strip():
assistant_id = self._assistant_id
run_config = _merge_dicts(
DEFAULT_RUN_CONFIG,
self._default_session.get("config"),
channel_layer.get("config"),
user_layer.get("config"),
)
run_context = _merge_dicts(
DEFAULT_RUN_CONTEXT,
self._default_session.get("context"),
channel_layer.get("context"),
user_layer.get("context"),
{"thread_id": thread_id},
)
return assistant_id, run_config, run_context
# -- LangGraph SDK client (lazy) ----------------------------------------
def _get_client(self):
"""Return the ``langgraph_sdk`` async client, creating it on first use."""
if self._client is None:
from langgraph_sdk import get_client
self._client = get_client(url=self._langgraph_url)
return self._client
# -- lifecycle ---------------------------------------------------------
async def start(self) -> None:
"""Start the dispatch loop."""
if self._running:
return
self._running = True
self._semaphore = asyncio.Semaphore(self._max_concurrency)
self._task = asyncio.create_task(self._dispatch_loop())
logger.info("ChannelManager started (max_concurrency=%d)", self._max_concurrency)
async def stop(self) -> None:
"""Stop the dispatch loop."""
self._running = False
if self._task:
self._task.cancel()
try:
await self._task
except asyncio.CancelledError:
pass
self._task = None
logger.info("ChannelManager stopped")
# -- dispatch loop -----------------------------------------------------
async def _dispatch_loop(self) -> None:
logger.info("[Manager] dispatch loop started, waiting for inbound messages")
while self._running:
try:
msg = await asyncio.wait_for(self.bus.get_inbound(), timeout=1.0)
except TimeoutError:
continue
except asyncio.CancelledError:
break
logger.info(
"[Manager] received inbound: channel=%s, chat_id=%s, type=%s, text=%r",
msg.channel_name,
msg.chat_id,
msg.msg_type.value,
msg.text[:100] if msg.text else "",
)
task = asyncio.create_task(self._handle_message(msg))
task.add_done_callback(self._log_task_error)
@staticmethod
def _log_task_error(task: asyncio.Task) -> None:
"""Surface unhandled exceptions from background tasks."""
if task.cancelled():
return
exc = task.exception()
if exc:
logger.error("[Manager] unhandled error in message task: %s", exc, exc_info=exc)
async def _handle_message(self, msg: InboundMessage) -> None:
async with self._semaphore:
try:
if msg.msg_type == InboundMessageType.COMMAND:
await self._handle_command(msg)
else:
await self._handle_chat(msg)
except Exception:
logger.exception(
"Error handling message from %s (chat=%s)",
msg.channel_name,
msg.chat_id,
)
await self._send_error(msg, "An internal error occurred. Please try again.")
# -- chat handling -----------------------------------------------------
async def _create_thread(self, client, msg: InboundMessage) -> str:
"""Create a new thread on the LangGraph Server and store the mapping."""
thread = await client.threads.create()
thread_id = thread["thread_id"]
self.store.set_thread_id(
msg.channel_name,
msg.chat_id,
thread_id,
topic_id=msg.topic_id,
user_id=msg.user_id,
)
logger.info("[Manager] new thread created on LangGraph Server: thread_id=%s for chat_id=%s topic_id=%s", thread_id, msg.chat_id, msg.topic_id)
return thread_id
async def _handle_chat(self, msg: InboundMessage, extra_context: dict[str, Any] | None = None) -> None:
client = self._get_client()
# Look up existing DeerFlow thread.
# topic_id may be None (e.g. Telegram private chats) — the store
# handles this by using the "channel:chat_id" key without a topic suffix.
thread_id = self.store.get_thread_id(msg.channel_name, msg.chat_id, topic_id=msg.topic_id)
if thread_id:
logger.info("[Manager] reusing thread: thread_id=%s for topic_id=%s", thread_id, msg.topic_id)
# No existing thread found — create a new one
if thread_id is None:
thread_id = await self._create_thread(client, msg)
assistant_id, run_config, run_context = self._resolve_run_params(msg, thread_id)
if extra_context:
run_context.update(extra_context)
if self._channel_supports_streaming(msg.channel_name):
await self._handle_streaming_chat(
client,
msg,
thread_id,
assistant_id,
run_config,
run_context,
)
return
logger.info("[Manager] invoking runs.wait(thread_id=%s, text=%r)", thread_id, msg.text[:100])
result = await client.runs.wait(
thread_id,
assistant_id,
input={"messages": [{"role": "human", "content": msg.text}]},
config=run_config,
context=run_context,
)
response_text = _extract_response_text(result)
artifacts = _extract_artifacts(result)
logger.info(
"[Manager] agent response received: thread_id=%s, response_len=%d, artifacts=%d",
thread_id,
len(response_text) if response_text else 0,
len(artifacts),
)
response_text, attachments = _prepare_artifact_delivery(thread_id, response_text, artifacts)
if not response_text:
if attachments:
response_text = _format_artifact_text([a.virtual_path for a in attachments])
else:
response_text = "(No response from agent)"
outbound = OutboundMessage(
channel_name=msg.channel_name,
chat_id=msg.chat_id,
thread_id=thread_id,
text=response_text,
artifacts=artifacts,
attachments=attachments,
thread_ts=msg.thread_ts,
)
logger.info("[Manager] publishing outbound message to bus: channel=%s, chat_id=%s", msg.channel_name, msg.chat_id)
await self.bus.publish_outbound(outbound)
async def _handle_streaming_chat(
self,
client,
msg: InboundMessage,
thread_id: str,
assistant_id: str,
run_config: dict[str, Any],
run_context: dict[str, Any],
) -> None:
logger.info("[Manager] invoking runs.stream(thread_id=%s, text=%r)", thread_id, msg.text[:100])
last_values: dict[str, Any] | list | None = None
streamed_buffers: dict[str, str] = {}
current_message_id: str | None = None
latest_text = ""
last_published_text = ""
last_publish_at = 0.0
stream_error: BaseException | None = None
try:
async for chunk in client.runs.stream(
thread_id,
assistant_id,
input={"messages": [{"role": "human", "content": msg.text}]},
config=run_config,
context=run_context,
stream_mode=["messages-tuple", "values"],
):
event = getattr(chunk, "event", "")
data = getattr(chunk, "data", None)
if event == "messages-tuple":
accumulated_text, current_message_id = _accumulate_stream_text(streamed_buffers, current_message_id, data)
if accumulated_text:
latest_text = accumulated_text
elif event == "values" and isinstance(data, (dict, list)):
last_values = data
snapshot_text = _extract_response_text(data)
if snapshot_text:
latest_text = snapshot_text
if not latest_text or latest_text == last_published_text:
continue
now = time.monotonic()
if last_published_text and now - last_publish_at < STREAM_UPDATE_MIN_INTERVAL_SECONDS:
continue
await self.bus.publish_outbound(
OutboundMessage(
channel_name=msg.channel_name,
chat_id=msg.chat_id,
thread_id=thread_id,
text=latest_text,
is_final=False,
thread_ts=msg.thread_ts,
)
)
last_published_text = latest_text
last_publish_at = now
except Exception as exc:
stream_error = exc
logger.exception("[Manager] streaming error: thread_id=%s", thread_id)
finally:
result = last_values if last_values is not None else {"messages": [{"type": "ai", "content": latest_text}]}
response_text = _extract_response_text(result)
artifacts = _extract_artifacts(result)
response_text, attachments = _prepare_artifact_delivery(thread_id, response_text, artifacts)
if not response_text:
if attachments:
response_text = _format_artifact_text([attachment.virtual_path for attachment in attachments])
elif stream_error:
response_text = "An error occurred while processing your request. Please try again."
else:
response_text = latest_text or "(No response from agent)"
logger.info(
"[Manager] streaming response completed: thread_id=%s, response_len=%d, artifacts=%d, error=%s",
thread_id,
len(response_text),
len(artifacts),
stream_error,
)
await self.bus.publish_outbound(
OutboundMessage(
channel_name=msg.channel_name,
chat_id=msg.chat_id,
thread_id=thread_id,
text=response_text,
artifacts=artifacts,
attachments=attachments,
is_final=True,
thread_ts=msg.thread_ts,
)
)
# -- command handling --------------------------------------------------
async def _handle_command(self, msg: InboundMessage) -> None:
text = msg.text.strip()
parts = text.split(maxsplit=1)
command = parts[0].lower().lstrip("/")
if command == "bootstrap":
from dataclasses import replace as _dc_replace
chat_text = parts[1] if len(parts) > 1 else "Initialize workspace"
chat_msg = _dc_replace(msg, text=chat_text, msg_type=InboundMessageType.CHAT)
await self._handle_chat(chat_msg, extra_context={"is_bootstrap": True})
return
if command == "new":
# Create a new thread on the LangGraph Server
client = self._get_client()
thread = await client.threads.create()
new_thread_id = thread["thread_id"]
self.store.set_thread_id(
msg.channel_name,
msg.chat_id,
new_thread_id,
topic_id=msg.topic_id,
user_id=msg.user_id,
)
reply = "New conversation started."
elif command == "status":
thread_id = self.store.get_thread_id(msg.channel_name, msg.chat_id, topic_id=msg.topic_id)
reply = f"Active thread: {thread_id}" if thread_id else "No active conversation."
elif command == "models":
reply = await self._fetch_gateway("/api/models", "models")
elif command == "memory":
reply = await self._fetch_gateway("/api/memory", "memory")
elif command == "help":
reply = (
"Available commands:\n"
"/bootstrap — Start a bootstrap session (enables agent setup)\n"
"/new — Start a new conversation\n"
"/status — Show current thread info\n"
"/models — List available models\n"
"/memory — Show memory status\n"
"/help — Show this help"
)
else:
reply = f"Unknown command: /{command}. Type /help for available commands."
outbound = OutboundMessage(
channel_name=msg.channel_name,
chat_id=msg.chat_id,
thread_id=self.store.get_thread_id(msg.channel_name, msg.chat_id) or "",
text=reply,
thread_ts=msg.thread_ts,
)
await self.bus.publish_outbound(outbound)
async def _fetch_gateway(self, path: str, kind: str) -> str:
"""Fetch data from the Gateway API for command responses."""
import httpx
try:
async with httpx.AsyncClient() as http:
resp = await http.get(f"{self._gateway_url}{path}", timeout=10)
resp.raise_for_status()
data = resp.json()
except Exception:
logger.exception("Failed to fetch %s from gateway", kind)
return f"Failed to fetch {kind} information."
if kind == "models":
names = [m["name"] for m in data.get("models", [])]
return ("Available models:\n" + "\n".join(f"• {n}" for n in names)) if names else "No models configured."
elif kind == "memory":
facts = data.get("facts", [])
return f"Memory contains {len(facts)} fact(s)."
return str(data)
# -- error helper ------------------------------------------------------
async def _send_error(self, msg: InboundMessage, error_text: str) -> None:
outbound = OutboundMessage(
channel_name=msg.channel_name,
chat_id=msg.chat_id,
thread_id=self.store.get_thread_id(msg.channel_name, msg.chat_id) or "",
text=error_text,
thread_ts=msg.thread_ts,
)
await self.bus.publish_outbound(outbound)
================================================
FILE: backend/app/channels/message_bus.py
================================================
"""MessageBus — async pub/sub hub that decouples channels from the agent dispatcher."""
from __future__ import annotations
import asyncio
import logging
import time
from collections.abc import Callable, Coroutine
from dataclasses import dataclass, field
from enum import StrEnum
from pathlib import Path
from typing import Any
logger = logging.getLogger(__name__)
# ---------------------------------------------------------------------------
# Message types
# ---------------------------------------------------------------------------
class InboundMessageType(StrEnum):
"""Types of messages arriving from IM channels."""
CHAT = "chat"
COMMAND = "command"
@dataclass
class InboundMessage:
"""A message arriving from an IM channel toward the agent dispatcher.
Attributes:
channel_name: Name of the source channel (e.g. "feishu", "slack").
chat_id: Platform-specific chat/conversation identifier.
user_id: Platform-specific user identifier.
text: The message text.
msg_type: Whether this is a regular chat message or a command.
thread_ts: Optional platform thread identifier (for threaded replies).
topic_id: Conversation topic identifier used to map to a DeerFlow thread.
Messages sharing the same ``topic_id`` within a ``chat_id`` will
reuse the same DeerFlow thread. When ``None``, each message
creates a new thread (one-shot Q&A).
files: Optional list of file attachments (platform-specific dicts).
metadata: Arbitrary extra data from the channel.
created_at: Unix timestamp when the message was created.
"""
channel_name: str
chat_id: str
user_id: str
text: str
msg_type: InboundMessageType = InboundMessageType.CHAT
thread_ts: str | None = None
topic_id: str | None = None
files: list[dict[str, Any]] = field(default_factory=list)
metadata: dict[str, Any] = field(default_factory=dict)
created_at: float = field(default_factory=time.time)
@dataclass
class ResolvedAttachment:
"""A file attachment resolved to a host filesystem path, ready for upload.
Attributes:
virtual_path: Original virtual path (e.g. /mnt/user-data/outputs/report.pdf).
actual_path: Resolved host filesystem path.
filename: Basename of the file.
mime_type: MIME type (e.g. "application/pdf").
size: File size in bytes.
is_image: True for image/* MIME types
gitextract_d9b2_eay/
├── .dockerignore
├── .gitattributes
├── .github/
│ ├── ISSUE_TEMPLATE/
│ │ └── runtime-information.yml
│ ├── copilot-instructions.md
│ └── workflows/
│ └── backend-unit-tests.yml
├── .gitignore
├── CONTRIBUTING.md
├── LICENSE
├── Makefile
├── README.md
├── README_ja.md
├── README_zh.md
├── SECURITY.md
├── backend/
│ ├── .gitignore
│ ├── .python-version
│ ├── AGENTS.md
│ ├── CLAUDE.md
│ ├── CONTRIBUTING.md
│ ├── Dockerfile
│ ├── Makefile
│ ├── README.md
│ ├── app/
│ │ ├── __init__.py
│ │ ├── channels/
│ │ │ ├── __init__.py
│ │ │ ├── base.py
│ │ │ ├── feishu.py
│ │ │ ├── manager.py
│ │ │ ├── message_bus.py
│ │ │ ├── service.py
│ │ │ ├── slack.py
│ │ │ ├── store.py
│ │ │ └── telegram.py
│ │ └── gateway/
│ │ ├── __init__.py
│ │ ├── app.py
│ │ ├── config.py
│ │ ├── path_utils.py
│ │ └── routers/
│ │ ├── __init__.py
│ │ ├── agents.py
│ │ ├── artifacts.py
│ │ ├── channels.py
│ │ ├── mcp.py
│ │ ├── memory.py
│ │ ├── models.py
│ │ ├── skills.py
│ │ ├── suggestions.py
│ │ └── uploads.py
│ ├── debug.py
│ ├── docs/
│ │ ├── API.md
│ │ ├── APPLE_CONTAINER.md
│ │ ├── ARCHITECTURE.md
│ │ ├── AUTO_TITLE_GENERATION.md
│ │ ├── CONFIGURATION.md
│ │ ├── FILE_UPLOAD.md
│ │ ├── HARNESS_APP_SPLIT.md
│ │ ├── MCP_SERVER.md
│ │ ├── MEMORY_IMPROVEMENTS.md
│ │ ├── MEMORY_IMPROVEMENTS_SUMMARY.md
│ │ ├── PATH_EXAMPLES.md
│ │ ├── README.md
│ │ ├── SETUP.md
│ │ ├── TITLE_GENERATION_IMPLEMENTATION.md
│ │ ├── TODO.md
│ │ ├── plan_mode_usage.md
│ │ ├── summarization.md
│ │ └── task_tool_improvements.md
│ ├── langgraph.json
│ ├── packages/
│ │ └── harness/
│ │ ├── deerflow/
│ │ │ ├── __init__.py
│ │ │ ├── agents/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── checkpointer/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── async_provider.py
│ │ │ │ │ └── provider.py
│ │ │ │ ├── lead_agent/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── agent.py
│ │ │ │ │ └── prompt.py
│ │ │ │ ├── memory/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── prompt.py
│ │ │ │ │ ├── queue.py
│ │ │ │ │ └── updater.py
│ │ │ │ ├── middlewares/
│ │ │ │ │ ├── clarification_middleware.py
│ │ │ │ │ ├── dangling_tool_call_middleware.py
│ │ │ │ │ ├── deferred_tool_filter_middleware.py
│ │ │ │ │ ├── loop_detection_middleware.py
│ │ │ │ │ ├── memory_middleware.py
│ │ │ │ │ ├── subagent_limit_middleware.py
│ │ │ │ │ ├── thread_data_middleware.py
│ │ │ │ │ ├── title_middleware.py
│ │ │ │ │ ├── todo_middleware.py
│ │ │ │ │ ├── tool_error_handling_middleware.py
│ │ │ │ │ ├── uploads_middleware.py
│ │ │ │ │ └── view_image_middleware.py
│ │ │ │ └── thread_state.py
│ │ │ ├── client.py
│ │ │ ├── community/
│ │ │ │ ├── aio_sandbox/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── aio_sandbox.py
│ │ │ │ │ ├── aio_sandbox_provider.py
│ │ │ │ │ ├── backend.py
│ │ │ │ │ ├── local_backend.py
│ │ │ │ │ ├── remote_backend.py
│ │ │ │ │ └── sandbox_info.py
│ │ │ │ ├── firecrawl/
│ │ │ │ │ └── tools.py
│ │ │ │ ├── image_search/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── tools.py
│ │ │ │ ├── infoquest/
│ │ │ │ │ ├── infoquest_client.py
│ │ │ │ │ └── tools.py
│ │ │ │ ├── jina_ai/
│ │ │ │ │ ├── jina_client.py
│ │ │ │ │ └── tools.py
│ │ │ │ └── tavily/
│ │ │ │ └── tools.py
│ │ │ ├── config/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── agents_config.py
│ │ │ │ ├── app_config.py
│ │ │ │ ├── checkpointer_config.py
│ │ │ │ ├── extensions_config.py
│ │ │ │ ├── memory_config.py
│ │ │ │ ├── model_config.py
│ │ │ │ ├── paths.py
│ │ │ │ ├── sandbox_config.py
│ │ │ │ ├── skills_config.py
│ │ │ │ ├── subagents_config.py
│ │ │ │ ├── summarization_config.py
│ │ │ │ ├── title_config.py
│ │ │ │ ├── tool_config.py
│ │ │ │ ├── tool_search_config.py
│ │ │ │ └── tracing_config.py
│ │ │ ├── mcp/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── cache.py
│ │ │ │ ├── client.py
│ │ │ │ ├── oauth.py
│ │ │ │ └── tools.py
│ │ │ ├── models/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── claude_provider.py
│ │ │ │ ├── credential_loader.py
│ │ │ │ ├── factory.py
│ │ │ │ ├── openai_codex_provider.py
│ │ │ │ ├── patched_deepseek.py
│ │ │ │ └── patched_minimax.py
│ │ │ ├── reflection/
│ │ │ │ ├── __init__.py
│ │ │ │ └── resolvers.py
│ │ │ ├── sandbox/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── exceptions.py
│ │ │ │ ├── local/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── list_dir.py
│ │ │ │ │ ├── local_sandbox.py
│ │ │ │ │ └── local_sandbox_provider.py
│ │ │ │ ├── middleware.py
│ │ │ │ ├── sandbox.py
│ │ │ │ ├── sandbox_provider.py
│ │ │ │ └── tools.py
│ │ │ ├── skills/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── loader.py
│ │ │ │ ├── parser.py
│ │ │ │ ├── types.py
│ │ │ │ └── validation.py
│ │ │ ├── subagents/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── builtins/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── bash_agent.py
│ │ │ │ │ └── general_purpose.py
│ │ │ │ ├── config.py
│ │ │ │ ├── executor.py
│ │ │ │ └── registry.py
│ │ │ ├── tools/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── builtins/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── clarification_tool.py
│ │ │ │ │ ├── present_file_tool.py
│ │ │ │ │ ├── setup_agent_tool.py
│ │ │ │ │ ├── task_tool.py
│ │ │ │ │ ├── tool_search.py
│ │ │ │ │ └── view_image_tool.py
│ │ │ │ └── tools.py
│ │ │ └── utils/
│ │ │ ├── file_conversion.py
│ │ │ ├── network.py
│ │ │ └── readability.py
│ │ └── pyproject.toml
│ ├── pyproject.toml
│ ├── ruff.toml
│ └── tests/
│ ├── conftest.py
│ ├── test_app_config_reload.py
│ ├── test_artifacts_router.py
│ ├── test_channel_file_attachments.py
│ ├── test_channels.py
│ ├── test_checkpointer.py
│ ├── test_checkpointer_none_fix.py
│ ├── test_cli_auth_providers.py
│ ├── test_client.py
│ ├── test_client_live.py
│ ├── test_config_version.py
│ ├── test_credential_loader.py
│ ├── test_custom_agent.py
│ ├── test_docker_sandbox_mode_detection.py
│ ├── test_feishu_parser.py
│ ├── test_harness_boundary.py
│ ├── test_infoquest_client.py
│ ├── test_lead_agent_model_resolution.py
│ ├── test_local_sandbox_encoding.py
│ ├── test_loop_detection_middleware.py
│ ├── test_mcp_client_config.py
│ ├── test_mcp_oauth.py
│ ├── test_memory_prompt_injection.py
│ ├── test_memory_updater.py
│ ├── test_memory_upload_filtering.py
│ ├── test_model_config.py
│ ├── test_model_factory.py
│ ├── test_patched_minimax.py
│ ├── test_present_file_tool_core_logic.py
│ ├── test_provisioner_kubeconfig.py
│ ├── test_readability.py
│ ├── test_reflection_resolvers.py
│ ├── test_sandbox_tools_security.py
│ ├── test_serialize_message_content.py
│ ├── test_skills_archive_root.py
│ ├── test_skills_loader.py
│ ├── test_skills_router.py
│ ├── test_subagent_executor.py
│ ├── test_subagent_timeout_config.py
│ ├── test_suggestions_router.py
│ ├── test_task_tool_core_logic.py
│ ├── test_thread_data_middleware.py
│ ├── test_title_generation.py
│ ├── test_title_middleware_core_logic.py
│ ├── test_token_usage.py
│ ├── test_tool_error_handling_middleware.py
│ ├── test_tool_search.py
│ ├── test_tracing_config.py
│ ├── test_uploads_middleware_core_logic.py
│ └── test_uploads_router.py
├── config.example.yaml
├── deer-flow.code-workspace
├── docker/
│ ├── docker-compose-dev.yaml
│ ├── docker-compose.yaml
│ ├── nginx/
│ │ ├── nginx.conf
│ │ └── nginx.local.conf
│ └── provisioner/
│ ├── Dockerfile
│ ├── README.md
│ └── app.py
├── docs/
│ ├── CODE_CHANGE_SUMMARY_BY_FILE.md
│ └── SKILL_NAME_CONFLICT_FIX.md
├── extensions_config.example.json
├── frontend/
│ ├── .gitignore
│ ├── .npmrc
│ ├── AGENTS.md
│ ├── CLAUDE.md
│ ├── Dockerfile
│ ├── Makefile
│ ├── README.md
│ ├── components.json
│ ├── eslint.config.js
│ ├── next.config.js
│ ├── package.json
│ ├── pnpm-workspace.yaml
│ ├── postcss.config.js
│ ├── prettier.config.js
│ ├── public/
│ │ └── demo/
│ │ └── threads/
│ │ ├── 21cfea46-34bd-4aa6-9e1f-3009452fbeb9/
│ │ │ └── thread.json
│ │ ├── 3823e443-4e2b-4679-b496-a9506eae462b/
│ │ │ ├── thread.json
│ │ │ └── user-data/
│ │ │ └── outputs/
│ │ │ └── fei-fei-li-podcast-timeline.md
│ │ ├── 4f3e55ee-f853-43db-bfb3-7d1a411f03cb/
│ │ │ └── thread.json
│ │ ├── 5aa47db1-d0cb-4eb9-aea5-3dac1b371c5a/
│ │ │ ├── thread.json
│ │ │ └── user-data/
│ │ │ └── outputs/
│ │ │ └── jiangsu-football/
│ │ │ ├── css/
│ │ │ │ └── style.css
│ │ │ ├── favicon.html
│ │ │ ├── index.html
│ │ │ └── js/
│ │ │ ├── data.js
│ │ │ └── main.js
│ │ ├── 7cfa5f8f-a2f8-47ad-acbd-da7137baf990/
│ │ │ ├── thread.json
│ │ │ └── user-data/
│ │ │ └── outputs/
│ │ │ ├── index.html
│ │ │ ├── script.js
│ │ │ └── style.css
│ │ ├── 7f9dc56c-e49c-4671-a3d2-c492ff4dce0c/
│ │ │ ├── thread.json
│ │ │ └── user-data/
│ │ │ └── outputs/
│ │ │ └── leica-master-photography-article.md
│ │ ├── 90040b36-7eba-4b97-ba89-02c3ad47a8b9/
│ │ │ └── thread.json
│ │ ├── ad76c455-5bf9-4335-8517-fc03834ab828/
│ │ │ ├── thread.json
│ │ │ └── user-data/
│ │ │ ├── outputs/
│ │ │ │ └── titanic_summary.txt
│ │ │ └── uploads/
│ │ │ └── titanic.csv
│ │ ├── b83fbb2a-4e36-4d82-9de0-7b2a02c2092a/
│ │ │ ├── thread.json
│ │ │ └── user-data/
│ │ │ └── outputs/
│ │ │ └── index.html
│ │ ├── c02bb4d5-4202-490e-ae8f-ff4864fc0d2e/
│ │ │ ├── thread.json
│ │ │ └── user-data/
│ │ │ └── outputs/
│ │ │ ├── index.html
│ │ │ ├── script.js
│ │ │ └── styles.css
│ │ ├── d3e5adaf-084c-4dd5-9d29-94f1d6bccd98/
│ │ │ ├── thread.json
│ │ │ └── user-data/
│ │ │ └── outputs/
│ │ │ └── diana_hu_research.md
│ │ ├── f4125791-0128-402a-8ca9-50e0947557e4/
│ │ │ ├── thread.json
│ │ │ └── user-data/
│ │ │ └── outputs/
│ │ │ └── index.html
│ │ └── fe3f7974-1bcb-4a01-a950-79673baafefd/
│ │ ├── thread.json
│ │ └── user-data/
│ │ └── outputs/
│ │ ├── index.html
│ │ └── research_deerflow_20260201.md
│ ├── scripts/
│ │ └── save-demo.js
│ ├── src/
│ │ ├── app/
│ │ │ ├── api/
│ │ │ │ └── auth/
│ │ │ │ └── [...all]/
│ │ │ │ └── route.ts
│ │ │ ├── layout.tsx
│ │ │ ├── mock/
│ │ │ │ └── api/
│ │ │ │ ├── mcp/
│ │ │ │ │ └── config/
│ │ │ │ │ └── route.ts
│ │ │ │ ├── models/
│ │ │ │ │ └── route.ts
│ │ │ │ ├── skills/
│ │ │ │ │ └── route.ts
│ │ │ │ └── threads/
│ │ │ │ ├── [thread_id]/
│ │ │ │ │ ├── artifacts/
│ │ │ │ │ │ └── [[...artifact_path]]/
│ │ │ │ │ │ └── route.ts
│ │ │ │ │ └── history/
│ │ │ │ │ └── route.ts
│ │ │ │ └── search/
│ │ │ │ └── route.ts
│ │ │ ├── page.tsx
│ │ │ └── workspace/
│ │ │ ├── agents/
│ │ │ │ ├── [agent_name]/
│ │ │ │ │ └── chats/
│ │ │ │ │ └── [thread_id]/
│ │ │ │ │ ├── layout.tsx
│ │ │ │ │ └── page.tsx
│ │ │ │ ├── new/
│ │ │ │ │ └── page.tsx
│ │ │ │ └── page.tsx
│ │ │ ├── chats/
│ │ │ │ ├── [thread_id]/
│ │ │ │ │ ├── layout.tsx
│ │ │ │ │ └── page.tsx
│ │ │ │ └── page.tsx
│ │ │ ├── layout.tsx
│ │ │ └── page.tsx
│ │ ├── components/
│ │ │ ├── ai-elements/
│ │ │ │ ├── artifact.tsx
│ │ │ │ ├── canvas.tsx
│ │ │ │ ├── chain-of-thought.tsx
│ │ │ │ ├── checkpoint.tsx
│ │ │ │ ├── code-block.tsx
│ │ │ │ ├── connection.tsx
│ │ │ │ ├── context.tsx
│ │ │ │ ├── controls.tsx
│ │ │ │ ├── conversation.tsx
│ │ │ │ ├── edge.tsx
│ │ │ │ ├── image.tsx
│ │ │ │ ├── loader.tsx
│ │ │ │ ├── message.tsx
│ │ │ │ ├── model-selector.tsx
│ │ │ │ ├── node.tsx
│ │ │ │ ├── open-in-chat.tsx
│ │ │ │ ├── panel.tsx
│ │ │ │ ├── plan.tsx
│ │ │ │ ├── prompt-input.tsx
│ │ │ │ ├── queue.tsx
│ │ │ │ ├── reasoning.tsx
│ │ │ │ ├── shimmer.tsx
│ │ │ │ ├── sources.tsx
│ │ │ │ ├── suggestion.tsx
│ │ │ │ ├── task.tsx
│ │ │ │ ├── toolbar.tsx
│ │ │ │ └── web-preview.tsx
│ │ │ ├── landing/
│ │ │ │ ├── footer.tsx
│ │ │ │ ├── header.tsx
│ │ │ │ ├── hero.tsx
│ │ │ │ ├── progressive-skills-animation.tsx
│ │ │ │ ├── section.tsx
│ │ │ │ └── sections/
│ │ │ │ ├── case-study-section.tsx
│ │ │ │ ├── community-section.tsx
│ │ │ │ ├── sandbox-section.tsx
│ │ │ │ ├── skills-section.tsx
│ │ │ │ └── whats-new-section.tsx
│ │ │ ├── theme-provider.tsx
│ │ │ ├── ui/
│ │ │ │ ├── alert.tsx
│ │ │ │ ├── aurora-text.tsx
│ │ │ │ ├── avatar.tsx
│ │ │ │ ├── badge.tsx
│ │ │ │ ├── breadcrumb.tsx
│ │ │ │ ├── button-group.tsx
│ │ │ │ ├── button.tsx
│ │ │ │ ├── card.tsx
│ │ │ │ ├── carousel.tsx
│ │ │ │ ├── collapsible.tsx
│ │ │ │ ├── command.tsx
│ │ │ │ ├── confetti-button.tsx
│ │ │ │ ├── dialog.tsx
│ │ │ │ ├── dropdown-menu.tsx
│ │ │ │ ├── empty.tsx
│ │ │ │ ├── flickering-grid.tsx
│ │ │ │ ├── galaxy.css
│ │ │ │ ├── galaxy.jsx
│ │ │ │ ├── hover-card.tsx
│ │ │ │ ├── input-group.tsx
│ │ │ │ ├── input.tsx
│ │ │ │ ├── item.tsx
│ │ │ │ ├── magic-bento.css
│ │ │ │ ├── magic-bento.tsx
│ │ │ │ ├── number-ticker.tsx
│ │ │ │ ├── progress.tsx
│ │ │ │ ├── resizable.tsx
│ │ │ │ ├── scroll-area.tsx
│ │ │ │ ├── select.tsx
│ │ │ │ ├── separator.tsx
│ │ │ │ ├── sheet.tsx
│ │ │ │ ├── shine-border.tsx
│ │ │ │ ├── sidebar.tsx
│ │ │ │ ├── skeleton.tsx
│ │ │ │ ├── sonner.tsx
│ │ │ │ ├── spotlight-card.css
│ │ │ │ ├── spotlight-card.tsx
│ │ │ │ ├── switch.tsx
│ │ │ │ ├── tabs.tsx
│ │ │ │ ├── terminal.tsx
│ │ │ │ ├── textarea.tsx
│ │ │ │ ├── toggle-group.tsx
│ │ │ │ ├── toggle.tsx
│ │ │ │ ├── tooltip.tsx
│ │ │ │ └── word-rotate.tsx
│ │ │ └── workspace/
│ │ │ ├── agent-welcome.tsx
│ │ │ ├── agents/
│ │ │ │ ├── agent-card.tsx
│ │ │ │ └── agent-gallery.tsx
│ │ │ ├── artifacts/
│ │ │ │ ├── artifact-file-detail.tsx
│ │ │ │ ├── artifact-file-list.tsx
│ │ │ │ ├── artifact-trigger.tsx
│ │ │ │ ├── context.tsx
│ │ │ │ └── index.ts
│ │ │ ├── chats/
│ │ │ │ ├── chat-box.tsx
│ │ │ │ ├── index.ts
│ │ │ │ ├── use-chat-mode.ts
│ │ │ │ └── use-thread-chat.ts
│ │ │ ├── citations/
│ │ │ │ ├── artifact-link.tsx
│ │ │ │ └── citation-link.tsx
│ │ │ ├── code-editor.tsx
│ │ │ ├── copy-button.tsx
│ │ │ ├── export-trigger.tsx
│ │ │ ├── flip-display.tsx
│ │ │ ├── github-icon.tsx
│ │ │ ├── input-box.tsx
│ │ │ ├── messages/
│ │ │ │ ├── context.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── markdown-content.tsx
│ │ │ │ ├── message-group.tsx
│ │ │ │ ├── message-list-item.tsx
│ │ │ │ ├── message-list.tsx
│ │ │ │ ├── skeleton.tsx
│ │ │ │ └── subtask-card.tsx
│ │ │ ├── mode-hover-guide.tsx
│ │ │ ├── overscroll.tsx
│ │ │ ├── recent-chat-list.tsx
│ │ │ ├── settings/
│ │ │ │ ├── about-content.ts
│ │ │ │ ├── about-settings-page.tsx
│ │ │ │ ├── about.md
│ │ │ │ ├── appearance-settings-page.tsx
│ │ │ │ ├── index.ts
│ │ │ │ ├── memory-settings-page.tsx
│ │ │ │ ├── notification-settings-page.tsx
│ │ │ │ ├── settings-dialog.tsx
│ │ │ │ ├── settings-section.tsx
│ │ │ │ ├── skill-settings-page.tsx
│ │ │ │ └── tool-settings-page.tsx
│ │ │ ├── streaming-indicator.tsx
│ │ │ ├── thread-title.tsx
│ │ │ ├── todo-list.tsx
│ │ │ ├── tooltip.tsx
│ │ │ ├── welcome.tsx
│ │ │ ├── workspace-container.tsx
│ │ │ ├── workspace-header.tsx
│ │ │ ├── workspace-nav-chat-list.tsx
│ │ │ ├── workspace-nav-menu.tsx
│ │ │ └── workspace-sidebar.tsx
│ │ ├── core/
│ │ │ ├── agents/
│ │ │ │ ├── api.ts
│ │ │ │ ├── hooks.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── types.ts
│ │ │ ├── api/
│ │ │ │ ├── api-client.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── stream-mode.test.ts
│ │ │ │ └── stream-mode.ts
│ │ │ ├── artifacts/
│ │ │ │ ├── hooks.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── loader.ts
│ │ │ │ └── utils.ts
│ │ │ ├── config/
│ │ │ │ └── index.ts
│ │ │ ├── i18n/
│ │ │ │ ├── context.tsx
│ │ │ │ ├── cookies.ts
│ │ │ │ ├── hooks.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── locale.ts
│ │ │ │ ├── locales/
│ │ │ │ │ ├── en-US.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ ├── types.ts
│ │ │ │ │ └── zh-CN.ts
│ │ │ │ └── server.ts
│ │ │ ├── mcp/
│ │ │ │ ├── api.ts
│ │ │ │ ├── hooks.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── types.ts
│ │ │ ├── memory/
│ │ │ │ ├── api.ts
│ │ │ │ ├── hooks.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── types.ts
│ │ │ ├── messages/
│ │ │ │ └── utils.ts
│ │ │ ├── models/
│ │ │ │ ├── api.ts
│ │ │ │ ├── hooks.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── types.ts
│ │ │ ├── notification/
│ │ │ │ └── hooks.ts
│ │ │ ├── rehype/
│ │ │ │ └── index.ts
│ │ │ ├── settings/
│ │ │ │ ├── hooks.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── local.ts
│ │ │ ├── skills/
│ │ │ │ ├── api.ts
│ │ │ │ ├── hooks.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── type.ts
│ │ │ ├── streamdown/
│ │ │ │ ├── index.ts
│ │ │ │ └── plugins.ts
│ │ │ ├── tasks/
│ │ │ │ ├── context.tsx
│ │ │ │ ├── index.ts
│ │ │ │ └── types.ts
│ │ │ ├── threads/
│ │ │ │ ├── export.ts
│ │ │ │ ├── hooks.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── types.ts
│ │ │ │ └── utils.ts
│ │ │ ├── todos/
│ │ │ │ ├── index.ts
│ │ │ │ └── types.ts
│ │ │ ├── tools/
│ │ │ │ └── utils.ts
│ │ │ ├── uploads/
│ │ │ │ ├── api.ts
│ │ │ │ ├── hooks.ts
│ │ │ │ └── index.ts
│ │ │ └── utils/
│ │ │ ├── datetime.ts
│ │ │ ├── files.tsx
│ │ │ ├── json.ts
│ │ │ ├── markdown.ts
│ │ │ └── uuid.ts
│ │ ├── env.js
│ │ ├── hooks/
│ │ │ └── use-mobile.ts
│ │ ├── lib/
│ │ │ └── utils.ts
│ │ ├── server/
│ │ │ └── better-auth/
│ │ │ ├── client.ts
│ │ │ ├── config.ts
│ │ │ ├── index.ts
│ │ │ └── server.ts
│ │ ├── styles/
│ │ │ └── globals.css
│ │ └── typings/
│ │ └── md.d.ts
│ └── tsconfig.json
├── scripts/
│ ├── check.py
│ ├── check.sh
│ ├── cleanup-containers.sh
│ ├── config-upgrade.sh
│ ├── configure.py
│ ├── deploy.sh
│ ├── docker.sh
│ ├── export_claude_code_oauth.py
│ ├── serve.sh
│ ├── start-daemon.sh
│ ├── tool-error-degradation-detection.sh
│ └── wait-for-port.sh
└── skills/
└── public/
├── bootstrap/
│ ├── SKILL.md
│ ├── references/
│ │ └── conversation-guide.md
│ └── templates/
│ └── SOUL.template.md
├── chart-visualization/
│ ├── SKILL.md
│ ├── references/
│ │ ├── generate_area_chart.md
│ │ ├── generate_bar_chart.md
│ │ ├── generate_boxplot_chart.md
│ │ ├── generate_column_chart.md
│ │ ├── generate_district_map.md
│ │ ├── generate_dual_axes_chart.md
│ │ ├── generate_fishbone_diagram.md
│ │ ├── generate_flow_diagram.md
│ │ ├── generate_funnel_chart.md
│ │ ├── generate_histogram_chart.md
│ │ ├── generate_line_chart.md
│ │ ├── generate_liquid_chart.md
│ │ ├── generate_mind_map.md
│ │ ├── generate_network_graph.md
│ │ ├── generate_organization_chart.md
│ │ ├── generate_path_map.md
│ │ ├── generate_pie_chart.md
│ │ ├── generate_pin_map.md
│ │ ├── generate_radar_chart.md
│ │ ├── generate_sankey_chart.md
│ │ ├── generate_scatter_chart.md
│ │ ├── generate_spreadsheet.md
│ │ ├── generate_treemap_chart.md
│ │ ├── generate_venn_chart.md
│ │ ├── generate_violin_chart.md
│ │ └── generate_word_cloud_chart.md
│ └── scripts/
│ └── generate.js
├── claude-to-deerflow/
│ ├── SKILL.md
│ └── scripts/
│ ├── chat.sh
│ └── status.sh
├── consulting-analysis/
│ └── SKILL.md
├── data-analysis/
│ ├── SKILL.md
│ └── scripts/
│ └── analyze.py
├── deep-research/
│ └── SKILL.md
├── find-skills/
│ ├── SKILL.md
│ └── scripts/
│ └── install-skill.sh
├── frontend-design/
│ ├── LICENSE.txt
│ └── SKILL.md
├── github-deep-research/
│ ├── SKILL.md
│ ├── assets/
│ │ └── report_template.md
│ └── scripts/
│ └── github_api.py
├── image-generation/
│ ├── SKILL.md
│ ├── scripts/
│ │ └── generate.py
│ └── templates/
│ └── doraemon.md
├── podcast-generation/
│ ├── SKILL.md
│ ├── scripts/
│ │ └── generate.py
│ └── templates/
│ └── tech-explainer.md
├── ppt-generation/
│ ├── SKILL.md
│ └── scripts/
│ └── generate.py
├── skill-creator/
│ ├── LICENSE.txt
│ ├── SKILL.md
│ ├── agents/
│ │ ├── analyzer.md
│ │ ├── comparator.md
│ │ └── grader.md
│ ├── assets/
│ │ └── eval_review.html
│ ├── eval-viewer/
│ │ ├── generate_review.py
│ │ └── viewer.html
│ ├── references/
│ │ ├── output-patterns.md
│ │ ├── schemas.md
│ │ └── workflows.md
│ └── scripts/
│ ├── aggregate_benchmark.py
│ ├── generate_report.py
│ ├── improve_description.py
│ ├── init_skill.py
│ ├── package_skill.py
│ ├── quick_validate.py
│ ├── run_eval.py
│ ├── run_loop.py
│ └── utils.py
├── surprise-me/
│ └── SKILL.md
├── vercel-deploy-claimable/
│ ├── SKILL.md
│ └── scripts/
│ └── deploy.sh
├── video-generation/
│ ├── SKILL.md
│ └── scripts/
│ └── generate.py
└── web-design-guidelines/
└── SKILL.md
Showing preview only (252K chars total). Download the full file or copy to clipboard to get everything.
SYMBOL INDEX (2496 symbols across 370 files)
FILE: backend/app/channels/base.py
class Channel (line 14) | class Channel(ABC):
method __init__ (line 24) | def __init__(self, name: str, bus: MessageBus, config: dict[str, Any])...
method is_running (line 31) | def is_running(self) -> bool:
method start (line 37) | async def start(self) -> None:
method stop (line 41) | async def stop(self) -> None:
method send (line 47) | async def send(self, msg: OutboundMessage) -> None:
method send_file (line 54) | async def send_file(self, msg: OutboundMessage, attachment: ResolvedAt...
method _make_inbound (line 64) | def _make_inbound(
method _on_outbound (line 87) | async def _on_outbound(self, msg: OutboundMessage) -> None:
FILE: backend/app/channels/feishu.py
class FeishuChannel (line 17) | class FeishuChannel(Channel):
method __init__ (line 35) | def __init__(self, bus: MessageBus, config: dict[str, Any]) -> None:
method start (line 53) | async def start(self) -> None:
method _run_ws (line 118) | def _run_ws(self, app_id: str, app_secret: str) -> None:
method stop (line 154) | async def stop(self) -> None:
method send (line 168) | async def send(self, msg: OutboundMessage, *, _max_retries: int = 3) -...
method send_file (line 201) | async def send_file(self, msg: OutboundMessage, attachment: ResolvedAt...
method _upload_image (line 236) | async def _upload_image(self, path) -> str:
method _upload_file (line 245) | async def _upload_file(self, path, filename: str) -> str:
method _build_card_content (line 269) | def _build_card_content(text: str) -> str:
method _add_reaction (line 283) | async def _add_reaction(self, message_id: str, emoji_type: str = "THUM...
method _reply_card (line 294) | async def _reply_card(self, message_id: str, text: str) -> str | None:
method _create_card (line 305) | async def _create_card(self, chat_id: str, text: str) -> None:
method _update_card (line 314) | async def _update_card(self, message_id: str, text: str) -> None:
method _track_background_task (line 323) | def _track_background_task(self, task: asyncio.Task, *, name: str, msg...
method _finalize_background_task (line 328) | def _finalize_background_task(self, task: asyncio.Task, name: str, msg...
method _create_running_card (line 332) | async def _create_running_card(self, source_message_id: str, text: str...
method _ensure_running_card_started (line 342) | def _ensure_running_card_started(self, source_message_id: str, text: s...
method _finalize_running_card_task (line 357) | def _finalize_running_card_task(self, source_message_id: str, task: as...
method _ensure_running_card (line 362) | async def _ensure_running_card(self, source_message_id: str, text: str...
method _send_running_reply (line 373) | async def _send_running_reply(self, message_id: str) -> None:
method _send_card_message (line 380) | async def _send_card_message(self, msg: OutboundMessage) -> None:
method _log_future_error (line 426) | def _log_future_error(fut, name: str, msg_id: str) -> None:
method _log_task_error (line 436) | def _log_task_error(task: asyncio.Task, name: str, msg_id: str) -> None:
method _prepare_inbound (line 447) | async def _prepare_inbound(self, msg_id: str, inbound) -> None:
method _on_message (line 454) | def _on_message(self, event) -> None:
FILE: backend/app/channels/manager.py
function _as_dict (line 36) | def _as_dict(value: Any) -> dict[str, Any]:
function _merge_dicts (line 40) | def _merge_dicts(*layers: Any) -> dict[str, Any]:
function _extract_response_text (line 48) | def _extract_response_text(result: dict | list) -> str:
function _extract_text_content (line 103) | def _extract_text_content(content: Any) -> str:
function _merge_stream_text (line 129) | def _merge_stream_text(existing: str, chunk: str) -> str:
function _extract_stream_message_id (line 142) | def _extract_stream_message_id(payload: Any, metadata: Any) -> str | None:
function _accumulate_stream_text (line 158) | def _accumulate_stream_text(
function _extract_artifacts (line 195) | def _extract_artifacts(result: dict | list) -> list[str]:
function _format_artifact_text (line 228) | def _format_artifact_text(artifacts: list[str]) -> str:
function _resolve_attachments (line 241) | def _resolve_attachments(thread_id: str, artifacts: list[str]) -> list[R...
function _prepare_artifact_delivery (line 290) | def _prepare_artifact_delivery(
class ChannelManager (line 317) | class ChannelManager:
method __init__ (line 325) | def __init__(
method _channel_supports_streaming (line 351) | def _channel_supports_streaming(channel_name: str) -> bool:
method _resolve_session_layer (line 354) | def _resolve_session_layer(self, msg: InboundMessage) -> tuple[dict[st...
method _resolve_run_params (line 360) | def _resolve_run_params(self, msg: InboundMessage, thread_id: str) -> ...
method _get_client (line 386) | def _get_client(self):
method start (line 396) | async def start(self) -> None:
method stop (line 405) | async def stop(self) -> None:
method _dispatch_loop (line 419) | async def _dispatch_loop(self) -> None:
method _log_task_error (line 440) | def _log_task_error(task: asyncio.Task) -> None:
method _handle_message (line 448) | async def _handle_message(self, msg: InboundMessage) -> None:
method _create_thread (line 465) | async def _create_thread(self, client, msg: InboundMessage) -> str:
method _handle_chat (line 479) | async def _handle_chat(self, msg: InboundMessage, extra_context: dict[...
method _handle_streaming_chat (line 546) | async def _handle_streaming_chat(
method _handle_command (line 645) | async def _handle_command(self, msg: InboundMessage) -> None:
method _fetch_gateway (line 700) | async def _fetch_gateway(self, path: str, kind: str) -> str:
method _send_error (line 723) | async def _send_error(self, msg: InboundMessage, error_text: str) -> N...
FILE: backend/app/channels/message_bus.py
class InboundMessageType (line 22) | class InboundMessageType(StrEnum):
class InboundMessage (line 30) | class InboundMessage:
class ResolvedAttachment (line 62) | class ResolvedAttachment:
class OutboundMessage (line 83) | class OutboundMessage:
class MessageBus (line 117) | class MessageBus:
method __init__ (line 125) | def __init__(self) -> None:
method publish_inbound (line 131) | async def publish_inbound(self, msg: InboundMessage) -> None:
method get_inbound (line 142) | async def get_inbound(self) -> InboundMessage:
method inbound_queue (line 147) | def inbound_queue(self) -> asyncio.Queue[InboundMessage]:
method subscribe_outbound (line 152) | def subscribe_outbound(self, callback: OutboundCallback) -> None:
method unsubscribe_outbound (line 156) | def unsubscribe_outbound(self, callback: OutboundCallback) -> None:
method publish_outbound (line 160) | async def publish_outbound(self, msg: OutboundMessage) -> None:
FILE: backend/app/channels/service.py
class ChannelService (line 22) | class ChannelService:
method __init__ (line 29) | def __init__(self, channels_config: dict[str, Any] | None = None) -> N...
method from_app_config (line 50) | def from_app_config(cls) -> ChannelService:
method start (line 62) | async def start(self) -> None:
method stop (line 81) | async def stop(self) -> None:
method restart_channel (line 95) | async def restart_channel(self, name: str) -> bool:
method _start_channel (line 111) | async def _start_channel(self, name: str, config: dict[str, Any]) -> b...
method get_status (line 136) | def get_status(self) -> dict[str, Any]:
function get_channel_service (line 158) | def get_channel_service() -> ChannelService | None:
function start_channel_service (line 163) | async def start_channel_service() -> ChannelService:
function stop_channel_service (line 173) | async def stop_channel_service() -> None:
FILE: backend/app/channels/slack.py
class SlackChannel (line 19) | class SlackChannel(Channel):
method __init__ (line 28) | def __init__(self, bus: MessageBus, config: dict[str, Any]) -> None:
method start (line 35) | async def start(self) -> None:
method stop (line 72) | async def stop(self) -> None:
method send (line 80) | async def send(self, msg: OutboundMessage, *, _max_retries: int = 3) -...
method send_file (line 131) | async def send_file(self, msg: OutboundMessage, attachment: ResolvedAt...
method _add_reaction (line 154) | def _add_reaction(self, channel_id: str, timestamp: str, emoji: str) -...
method _send_running_reply (line 168) | def _send_running_reply(self, channel_id: str, thread_ts: str) -> None:
method _on_socket_event (line 182) | def _on_socket_event(self, client, req) -> None:
method _handle_message_event (line 203) | def _handle_message_event(self, event: dict) -> None:
FILE: backend/app/channels/store.py
class ChannelStore (line 16) | class ChannelStore:
method __init__ (line 36) | def __init__(self, path: str | Path | None = None) -> None:
method _load (line 48) | def _load(self) -> dict[str, dict[str, Any]]:
method _save (line 56) | def _save(self) -> None:
method _key (line 75) | def _key(channel_name: str, chat_id: str, topic_id: str | None = None)...
method get_thread_id (line 82) | def get_thread_id(self, channel_name: str, chat_id: str, topic_id: str...
method set_thread_id (line 87) | def set_thread_id(
method remove (line 109) | def remove(self, channel_name: str, chat_id: str, topic_id: str | None...
method list_entries (line 139) | def list_entries(self, channel_name: str | None = None) -> list[dict[s...
FILE: backend/app/channels/telegram.py
class TelegramChannel (line 16) | class TelegramChannel(Channel):
method __init__ (line 24) | def __init__(self, bus: MessageBus, config: dict[str, Any]) -> None:
method start (line 39) | async def start(self) -> None:
method stop (line 79) | async def stop(self) -> None:
method send (line 90) | async def send(self, msg: OutboundMessage, *, _max_retries: int = 3) -...
method send_file (line 130) | async def send_file(self, msg: OutboundMessage, attachment: ResolvedAt...
method _send_running_reply (line 174) | async def _send_running_reply(self, chat_id: str, reply_to_message_id:...
method _log_future_error (line 191) | def _log_future_error(fut, name: str, msg_id: str):
method _run_polling (line 199) | def _run_polling(self) -> None:
method _check_user (line 224) | def _check_user(self, user_id: int) -> bool:
method _cmd_start (line 229) | async def _cmd_start(self, update, context) -> None:
method _process_incoming_with_reply (line 235) | async def _process_incoming_with_reply(self, chat_id: str, msg_id: int...
method _cmd_generic (line 239) | async def _cmd_generic(self, update, context) -> None:
method _on_text (line 275) | async def _on_text(self, update, context) -> None:
FILE: backend/app/gateway/app.py
function lifespan (line 32) | async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
function create_app (line 72) | def create_app() -> FastAPI:
FILE: backend/app/gateway/config.py
class GatewayConfig (line 6) | class GatewayConfig(BaseModel):
function get_gateway_config (line 17) | def get_gateway_config() -> GatewayConfig:
FILE: backend/app/gateway/path_utils.py
function resolve_thread_virtual_path (line 10) | def resolve_thread_virtual_path(thread_id: str, virtual_path: str) -> Path:
FILE: backend/app/gateway/routers/agents.py
class AgentResponse (line 20) | class AgentResponse(BaseModel):
class AgentsListResponse (line 30) | class AgentsListResponse(BaseModel):
class AgentCreateRequest (line 36) | class AgentCreateRequest(BaseModel):
class AgentUpdateRequest (line 46) | class AgentUpdateRequest(BaseModel):
function _validate_agent_name (line 55) | def _validate_agent_name(name: str) -> None:
function _normalize_agent_name (line 71) | def _normalize_agent_name(name: str) -> str:
function _agent_config_to_response (line 76) | def _agent_config_to_response(agent_cfg: AgentConfig, include_soul: bool...
function list_agents (line 97) | async def list_agents() -> AgentsListResponse:
function check_agent_name (line 116) | async def check_agent_name(name: str) -> dict:
function get_agent (line 140) | async def get_agent(name: str) -> AgentResponse:
function create_agent_endpoint (line 172) | async def create_agent_endpoint(request: AgentCreateRequest) -> AgentRes...
function update_agent (line 233) | async def update_agent(name: str, request: AgentUpdateRequest) -> AgentR...
class UserProfileResponse (line 294) | class UserProfileResponse(BaseModel):
class UserProfileUpdateRequest (line 300) | class UserProfileUpdateRequest(BaseModel):
function get_user_profile (line 312) | async def get_user_profile() -> UserProfileResponse:
function update_user_profile (line 335) | async def update_user_profile(request: UserProfileUpdateRequest) -> User...
function delete_agent (line 361) | async def delete_agent(name: str) -> None:
FILE: backend/app/gateway/routers/artifacts.py
function is_text_file_by_content (line 17) | def is_text_file_by_content(path: Path, sample_size: int = 8192) -> bool:
function _extract_file_from_skill_archive (line 28) | def _extract_file_from_skill_archive(zip_path: Path, internal_path: str)...
function get_artifact (line 66) | async def get_artifact(thread_id: str, path: str, request: Request) -> R...
FILE: backend/app/gateway/routers/channels.py
class ChannelStatusResponse (line 15) | class ChannelStatusResponse(BaseModel):
class ChannelRestartResponse (line 20) | class ChannelRestartResponse(BaseModel):
function get_channels_status (line 26) | async def get_channels_status() -> ChannelStatusResponse:
function restart_channel (line 38) | async def restart_channel(name: str) -> ChannelRestartResponse:
FILE: backend/app/gateway/routers/mcp.py
class McpOAuthConfigResponse (line 15) | class McpOAuthConfigResponse(BaseModel):
class McpServerConfigResponse (line 34) | class McpServerConfigResponse(BaseModel):
class McpConfigResponse (line 48) | class McpConfigResponse(BaseModel):
class McpConfigUpdateRequest (line 57) | class McpConfigUpdateRequest(BaseModel):
function get_mcp_configuration (line 72) | async def get_mcp_configuration() -> McpConfigResponse:
function update_mcp_configuration (line 104) | async def update_mcp_configuration(request: McpConfigUpdateRequest) -> M...
FILE: backend/app/gateway/routers/memory.py
class ContextSection (line 12) | class ContextSection(BaseModel):
class UserContext (line 19) | class UserContext(BaseModel):
class HistoryContext (line 27) | class HistoryContext(BaseModel):
class Fact (line 35) | class Fact(BaseModel):
class MemoryResponse (line 46) | class MemoryResponse(BaseModel):
class MemoryConfigResponse (line 56) | class MemoryConfigResponse(BaseModel):
class MemoryStatusResponse (line 68) | class MemoryStatusResponse(BaseModel):
function get_memory (line 81) | async def get_memory() -> MemoryResponse:
function reload_memory (line 125) | async def reload_memory() -> MemoryResponse:
function get_memory_config_endpoint (line 144) | async def get_memory_config_endpoint() -> MemoryConfigResponse:
function get_memory_status (line 181) | async def get_memory_status() -> MemoryStatusResponse:
FILE: backend/app/gateway/routers/models.py
class ModelResponse (line 9) | class ModelResponse(BaseModel):
class ModelsListResponse (line 20) | class ModelsListResponse(BaseModel):
function list_models (line 32) | async def list_models() -> ModelsListResponse:
function get_model (line 82) | async def get_model(model_name: str) -> ModelResponse:
FILE: backend/app/gateway/routers/skills.py
function _is_unsafe_zip_member (line 21) | def _is_unsafe_zip_member(info: zipfile.ZipInfo) -> bool:
function _is_symlink_member (line 34) | def _is_symlink_member(info: zipfile.ZipInfo) -> bool:
function _safe_extract_skill_archive (line 41) | def _safe_extract_skill_archive(
class SkillResponse (line 92) | class SkillResponse(BaseModel):
class SkillsListResponse (line 102) | class SkillsListResponse(BaseModel):
class SkillUpdateRequest (line 108) | class SkillUpdateRequest(BaseModel):
class SkillInstallRequest (line 114) | class SkillInstallRequest(BaseModel):
class SkillInstallResponse (line 121) | class SkillInstallResponse(BaseModel):
function _should_ignore_archive_entry (line 129) | def _should_ignore_archive_entry(path: Path) -> bool:
function _resolve_skill_dir_from_archive_root (line 133) | def _resolve_skill_dir_from_archive_root(temp_path: Path) -> Path:
function _skill_to_response (line 142) | def _skill_to_response(skill: Skill) -> SkillResponse:
function list_skills (line 159) | async def list_skills() -> SkillsListResponse:
function get_skill (line 204) | async def get_skill(skill_name: str) -> SkillResponse:
function update_skill (line 248) | async def update_skill(skill_name: str, request: SkillUpdateRequest) -> ...
function install_skill (line 341) | async def install_skill(request: SkillInstallRequest) -> SkillInstallRes...
FILE: backend/app/gateway/routers/suggestions.py
class SuggestionMessage (line 14) | class SuggestionMessage(BaseModel):
class SuggestionsRequest (line 19) | class SuggestionsRequest(BaseModel):
class SuggestionsResponse (line 25) | class SuggestionsResponse(BaseModel):
function _strip_markdown_code_fence (line 29) | def _strip_markdown_code_fence(text: str) -> str:
function _parse_json_string_list (line 39) | def _parse_json_string_list(text: str) -> list[str] | None:
function _extract_response_text (line 63) | def _extract_response_text(content: object) -> str:
function _format_conversation (line 81) | def _format_conversation(messages: list[SuggestionMessage]) -> str:
function generate_suggestions (line 100) | async def generate_suggestions(thread_id: str, request: SuggestionsReque...
FILE: backend/app/gateway/routers/uploads.py
class UploadResponse (line 18) | class UploadResponse(BaseModel):
function get_uploads_dir (line 26) | def get_uploads_dir(thread_id: str) -> Path:
function upload_files (line 41) | async def upload_files(
function list_uploaded_files (line 132) | async def list_uploaded_files(thread_id: str) -> dict:
function delete_uploaded_file (line 167) | async def delete_uploaded_file(thread_id: str, filename: str) -> dict:
FILE: backend/debug.py
function main (line 35) | async def main():
FILE: backend/packages/harness/deerflow/agents/checkpointer/async_provider.py
function _async_checkpointer (line 42) | async def _async_checkpointer(config) -> AsyncIterator[Checkpointer]:
function make_checkpointer (line 90) | async def make_checkpointer() -> AsyncIterator[Checkpointer]:
FILE: backend/packages/harness/deerflow/agents/checkpointer/provider.py
function _resolve_sqlite_conn_str (line 47) | def _resolve_sqlite_conn_str(raw: str) -> str:
function _sync_checkpointer_cm (line 60) | def _sync_checkpointer_cm(config: CheckpointerConfig) -> Iterator[Checkp...
function get_checkpointer (line 114) | def get_checkpointer() -> Checkpointer:
function reset_checkpointer (line 160) | def reset_checkpointer() -> None:
function checkpointer_context (line 182) | def checkpointer_context() -> Iterator[Checkpointer]:
FILE: backend/packages/harness/deerflow/agents/lead_agent/agent.py
function _resolve_model_name (line 25) | def _resolve_model_name(requested_model_name: str | None = None) -> str:
function _create_summarization_middleware (line 40) | def _create_summarization_middleware() -> SummarizationMiddleware | None:
function _create_todo_list_middleware (line 82) | def _create_todo_list_middleware(is_plan_mode: bool) -> TodoMiddleware |...
function _build_middlewares (line 207) | def _build_middlewares(config: RunnableConfig, model_name: str | None, a...
function make_lead_agent (line 262) | def make_lead_agent(config: RunnableConfig):
FILE: backend/packages/harness/deerflow/agents/lead_agent/prompt.py
function _build_subagent_section (line 7) | def _build_subagent_section(max_concurrent: int) -> str:
function _get_memory_context (line 338) | def _get_memory_context(agent_name: str | None = None) -> str:
function get_skills_prompt_section (line 370) | def get_skills_prompt_section(available_skills: set[str] | None = None) ...
function get_agent_soul (line 414) | def get_agent_soul(agent_name: str | None) -> str:
function get_deferred_tools_prompt_section (line 422) | def get_deferred_tools_prompt_section() -> str:
function apply_prompt_template (line 447) | def apply_prompt_template(subagent_enabled: bool = False, max_concurrent...
FILE: backend/packages/harness/deerflow/agents/memory/prompt.py
function _count_tokens (line 148) | def _count_tokens(text: str, encoding_name: str = "cl100k_base") -> int:
function _coerce_confidence (line 170) | def _coerce_confidence(value: Any, default: float = 0.0) -> float:
function format_memory_for_injection (line 186) | def format_memory_for_injection(memory_data: dict[str, Any], max_tokens:...
function format_conversation_for_update (line 303) | def format_conversation_for_update(messages: list[Any]) -> str:
FILE: backend/packages/harness/deerflow/agents/memory/queue.py
class ConversationContext (line 13) | class ConversationContext:
class MemoryUpdateQueue (line 22) | class MemoryUpdateQueue:
method __init__ (line 30) | def __init__(self):
method add (line 37) | def add(self, thread_id: str, messages: list[Any], agent_name: str | N...
method _reset_timer (line 66) | def _reset_timer(self) -> None:
method _process_queue (line 84) | def _process_queue(self) -> None:
method flush (line 131) | def flush(self) -> None:
method clear (line 143) | def clear(self) -> None:
method pending_count (line 156) | def pending_count(self) -> int:
method is_processing (line 162) | def is_processing(self) -> bool:
function get_memory_queue (line 173) | def get_memory_queue() -> MemoryUpdateQueue:
function reset_memory_queue (line 186) | def reset_memory_queue() -> None:
FILE: backend/packages/harness/deerflow/agents/memory/updater.py
function _get_memory_file_path (line 22) | def _get_memory_file_path(agent_name: str | None = None) -> Path:
function _create_empty_memory (line 43) | def _create_empty_memory() -> dict[str, Any]:
function get_memory_data (line 67) | def get_memory_data(agent_name: str | None = None) -> dict[str, Any]:
function reload_memory_data (line 98) | def reload_memory_data(agent_name: str | None = None) -> dict[str, Any]:
function _extract_text (line 119) | def _extract_text(content: Any) -> str:
function _load_memory_from_file (line 156) | def _load_memory_from_file(agent_name: str | None = None) -> dict[str, A...
function _strip_upload_mentions_from_memory (line 193) | def _strip_upload_mentions_from_memory(memory_data: dict[str, Any]) -> d...
function _fact_content_key (line 216) | def _fact_content_key(content: Any) -> str | None:
function _save_memory_to_file (line 225) | def _save_memory_to_file(memory_data: dict[str, Any], agent_name: str | ...
class MemoryUpdater (line 267) | class MemoryUpdater:
method __init__ (line 270) | def __init__(self, model_name: str | None = None):
method _get_model (line 278) | def _get_model(self):
method update_memory (line 284) | def update_memory(self, messages: list[Any], thread_id: str | None = N...
method _apply_updates (line 350) | def _apply_updates(
function update_memory_from_conversation (line 437) | def update_memory_from_conversation(messages: list[Any], thread_id: str ...
FILE: backend/packages/harness/deerflow/agents/middlewares/clarification_middleware.py
class ClarificationMiddlewareState (line 14) | class ClarificationMiddlewareState(AgentState):
class ClarificationMiddleware (line 20) | class ClarificationMiddleware(AgentMiddleware[ClarificationMiddlewareSta...
method _is_chinese (line 35) | def _is_chinese(self, text: str) -> bool:
method _format_clarification_message (line 46) | def _format_clarification_message(self, args: dict) -> str:
method _handle_clarification (line 91) | def _handle_clarification(self, request: ToolCallRequest) -> Command:
method wrap_tool_call (line 132) | def wrap_tool_call(
method awrap_tool_call (line 154) | async def awrap_tool_call(
FILE: backend/packages/harness/deerflow/agents/middlewares/dangling_tool_call_middleware.py
class DanglingToolCallMiddleware (line 28) | class DanglingToolCallMiddleware(AgentMiddleware[AgentState]):
method _build_patched_messages (line 36) | def _build_patched_messages(self, messages: list) -> list | None:
method wrap_model_call (line 91) | def wrap_model_call(
method awrap_model_call (line 102) | async def awrap_model_call(
FILE: backend/packages/harness/deerflow/agents/middlewares/deferred_tool_filter_middleware.py
class DeferredToolFilterMiddleware (line 23) | class DeferredToolFilterMiddleware(AgentMiddleware[AgentState]):
method _filter_tools (line 31) | def _filter_tools(self, request: ModelRequest) -> ModelRequest:
method wrap_model_call (line 47) | def wrap_model_call(
method awrap_model_call (line 55) | async def awrap_model_call(
FILE: backend/packages/harness/deerflow/agents/middlewares/loop_detection_middleware.py
function _hash_tool_calls (line 36) | def _hash_tool_calls(tool_calls: list[dict]) -> str:
class LoopDetectionMiddleware (line 76) | class LoopDetectionMiddleware(AgentMiddleware[AgentState]):
method __init__ (line 90) | def __init__(
method _get_thread_id (line 107) | def _get_thread_id(self, runtime: Runtime) -> str:
method _evict_if_needed (line 114) | def _evict_if_needed(self) -> None:
method _track_and_check (line 124) | def _track_and_check(self, state: AgentState, runtime: Runtime) -> tup...
method _apply (line 192) | def _apply(self, state: AgentState, runtime: Runtime) -> dict | None:
method after_model (line 212) | def after_model(self, state: AgentState, runtime: Runtime) -> dict | N...
method aafter_model (line 216) | async def aafter_model(self, state: AgentState, runtime: Runtime) -> d...
method reset (line 219) | def reset(self, thread_id: str | None = None) -> None:
FILE: backend/packages/harness/deerflow/agents/middlewares/memory_middleware.py
class MemoryMiddlewareState (line 14) | class MemoryMiddlewareState(AgentState):
function _filter_messages_for_memory (line 20) | def _filter_messages_for_memory(messages: list[Any]) -> list[Any]:
class MemoryMiddleware (line 86) | class MemoryMiddleware(AgentMiddleware[MemoryMiddlewareState]):
method __init__ (line 98) | def __init__(self, agent_name: str | None = None):
method after_agent (line 108) | def after_agent(self, state: MemoryMiddlewareState, runtime: Runtime) ...
FILE: backend/packages/harness/deerflow/agents/middlewares/subagent_limit_middleware.py
function _clamp_subagent_limit (line 19) | def _clamp_subagent_limit(value: int) -> int:
class SubagentLimitMiddleware (line 24) | class SubagentLimitMiddleware(AgentMiddleware[AgentState]):
method __init__ (line 36) | def __init__(self, max_concurrent: int = MAX_CONCURRENT_SUBAGENTS):
method _truncate_task_calls (line 40) | def _truncate_task_calls(self, state: AgentState) -> dict | None:
method after_model (line 70) | def after_model(self, state: AgentState, runtime: Runtime) -> dict | N...
method aafter_model (line 74) | async def aafter_model(self, state: AgentState, runtime: Runtime) -> d...
FILE: backend/packages/harness/deerflow/agents/middlewares/thread_data_middleware.py
class ThreadDataMiddlewareState (line 12) | class ThreadDataMiddlewareState(AgentState):
class ThreadDataMiddleware (line 18) | class ThreadDataMiddleware(AgentMiddleware[ThreadDataMiddlewareState]):
method __init__ (line 33) | def __init__(self, base_dir: str | None = None, lazy_init: bool = True):
method _get_thread_paths (line 46) | def _get_thread_paths(self, thread_id: str) -> dict[str, str]:
method _create_thread_directories (line 61) | def _create_thread_directories(self, thread_id: str) -> dict[str, str]:
method before_agent (line 74) | def before_agent(self, state: ThreadDataMiddlewareState, runtime: Runt...
FILE: backend/packages/harness/deerflow/agents/middlewares/title_middleware.py
class TitleMiddlewareState (line 16) | class TitleMiddlewareState(AgentState):
class TitleMiddleware (line 22) | class TitleMiddleware(AgentMiddleware[TitleMiddlewareState]):
method _normalize_content (line 27) | def _normalize_content(self, content: object) -> str:
method _should_generate_title (line 46) | def _should_generate_title(self, state: TitleMiddlewareState) -> bool:
method _build_title_prompt (line 68) | def _build_title_prompt(self, state: TitleMiddlewareState) -> tuple[st...
method _parse_title (line 89) | def _parse_title(self, content: object) -> str:
method _fallback_title (line 96) | def _fallback_title(self, user_msg: str) -> str:
method _generate_title_result (line 103) | def _generate_title_result(self, state: TitleMiddlewareState) -> dict ...
method _agenerate_title_result (line 123) | async def _agenerate_title_result(self, state: TitleMiddlewareState) -...
method after_model (line 144) | def after_model(self, state: TitleMiddlewareState, runtime: Runtime) -...
method aafter_model (line 148) | async def aafter_model(self, state: TitleMiddlewareState, runtime: Run...
FILE: backend/packages/harness/deerflow/agents/middlewares/todo_middleware.py
function _todos_in_messages (line 19) | def _todos_in_messages(messages: list[Any]) -> bool:
function _reminder_in_messages (line 29) | def _reminder_in_messages(messages: list[Any]) -> bool:
function _format_todos (line 37) | def _format_todos(todos: list[Todo]) -> str:
class TodoMiddleware (line 47) | class TodoMiddleware(TodoListMiddleware):
method before_model (line 57) | def before_model(
method abefore_model (line 94) | async def abefore_model(
FILE: backend/packages/harness/deerflow/agents/middlewares/tool_error_handling_middleware.py
class ToolErrorHandlingMiddleware (line 19) | class ToolErrorHandlingMiddleware(AgentMiddleware[AgentState]):
method _build_error_message (line 22) | def _build_error_message(self, request: ToolCallRequest, exc: Exceptio...
method wrap_tool_call (line 38) | def wrap_tool_call(
method awrap_tool_call (line 53) | async def awrap_tool_call(
function _build_runtime_middlewares (line 68) | def _build_runtime_middlewares(
function build_lead_runtime_middlewares (line 97) | def build_lead_runtime_middlewares(*, lazy_init: bool = True) -> list[Ag...
function build_subagent_runtime_middlewares (line 106) | def build_subagent_runtime_middlewares(*, lazy_init: bool = True) -> lis...
FILE: backend/packages/harness/deerflow/agents/middlewares/uploads_middleware.py
class UploadsMiddlewareState (line 17) | class UploadsMiddlewareState(AgentState):
class UploadsMiddleware (line 23) | class UploadsMiddleware(AgentMiddleware[UploadsMiddlewareState]):
method __init__ (line 33) | def __init__(self, base_dir: str | None = None):
method _create_files_message (line 42) | def _create_files_message(self, new_files: list[dict], historical_file...
method _files_from_kwargs (line 81) | def _files_from_kwargs(self, message: HumanMessage, uploads_dir: Path ...
method before_agent (line 120) | def before_agent(self, state: UploadsMiddlewareState, runtime: Runtime...
FILE: backend/packages/harness/deerflow/agents/middlewares/view_image_middleware.py
class ViewImageMiddlewareState (line 13) | class ViewImageMiddlewareState(AgentState):
class ViewImageMiddleware (line 19) | class ViewImageMiddleware(AgentMiddleware[ViewImageMiddlewareState]):
method _get_last_assistant_message (line 35) | def _get_last_assistant_message(self, messages: list) -> AIMessage | N...
method _has_view_image_tool (line 49) | def _has_view_image_tool(self, message: AIMessage) -> bool:
method _all_tools_completed (line 63) | def _all_tools_completed(self, messages: list, assistant_msg: AIMessag...
method _create_image_details_message (line 94) | def _create_image_details_message(self, state: ViewImageMiddlewareStat...
method _should_inject_image_message (line 128) | def _should_inject_image_message(self, state: ViewImageMiddlewareState...
method _inject_image_message (line 166) | def _inject_image_message(self, state: ViewImageMiddlewareState) -> di...
method before_model (line 190) | def before_model(self, state: ViewImageMiddlewareState, runtime: Runti...
method abefore_model (line 207) | async def abefore_model(self, state: ViewImageMiddlewareState, runtime...
FILE: backend/packages/harness/deerflow/agents/thread_state.py
class SandboxState (line 6) | class SandboxState(TypedDict):
class ThreadDataState (line 10) | class ThreadDataState(TypedDict):
class ViewedImageData (line 16) | class ViewedImageData(TypedDict):
function merge_artifacts (line 21) | def merge_artifacts(existing: list[str] | None, new: list[str] | None) -...
function merge_viewed_images (line 31) | def merge_viewed_images(existing: dict[str, ViewedImageData] | None, new...
class ThreadState (line 48) | class ThreadState(AgentState):
FILE: backend/packages/harness/deerflow/client.py
class StreamEvent (line 50) | class StreamEvent:
class DeerFlowClient (line 67) | class DeerFlowClient:
method __init__ (line 101) | def __init__(
method reset_agent (line 145) | def reset_agent(self) -> None:
method _atomic_write_json (line 160) | def _atomic_write_json(path: Path, data: dict) -> None:
method _get_runnable_config (line 177) | def _get_runnable_config(self, thread_id: str, **overrides) -> Runnabl...
method _ensure_agent (line 191) | def _ensure_agent(self, config: RunnableConfig):
method _get_tools (line 233) | def _get_tools(*, model_name: str | None, subagent_enabled: bool):
method _serialize_message (line 240) | def _serialize_message(msg) -> dict:
method _extract_text (line 264) | def _extract_text(content) -> str:
method stream (line 309) | def stream(
method chat (line 419) | def chat(self, message: str, *, thread_id: str | None = None, **kwargs...
method list_models (line 447) | def list_models(self) -> dict:
method list_skills (line 468) | def list_skills(self, enabled_only: bool = False) -> dict:
method get_memory (line 493) | def get_memory(self) -> dict:
method get_model (line 503) | def get_model(self, name: str) -> dict | None:
method get_mcp_config (line 529) | def get_mcp_config(self) -> dict:
method update_mcp_config (line 539) | def update_mcp_config(self, mcp_servers: dict[str, dict]) -> dict:
method get_skill (line 576) | def get_skill(self, name: str) -> dict | None:
method update_skill (line 598) | def update_skill(self, name: str, *, enabled: bool) -> dict:
method install_skill (line 647) | def install_skill(self, skill_path: str | Path) -> dict:
method reload_memory (line 715) | def reload_memory(self) -> dict:
method get_memory_config (line 725) | def get_memory_config(self) -> dict:
method get_memory_status (line 744) | def get_memory_status(self) -> dict:
method _get_uploads_dir (line 760) | def _get_uploads_dir(thread_id: str) -> Path:
method upload_files (line 766) | def upload_files(self, thread_id: str, files: list[str | Path]) -> dict:
method list_uploads (line 861) | def list_uploads(self, thread_id: str) -> dict:
method delete_upload (line 895) | def delete_upload(self, thread_id: str, filename: str) -> dict:
method get_artifact (line 928) | def get_artifact(self, thread_id: str, path: str) -> tuple[bytes, str]:
FILE: backend/packages/harness/deerflow/community/aio_sandbox/aio_sandbox.py
class AioSandbox (line 11) | class AioSandbox(Sandbox):
method __init__ (line 17) | def __init__(self, id: str, base_url: str, home_dir: str | None = None):
method base_url (line 31) | def base_url(self) -> str:
method home_dir (line 35) | def home_dir(self) -> str:
method execute_command (line 42) | def execute_command(self, command: str) -> str:
method read_file (line 59) | def read_file(self, path: str) -> str:
method list_dir (line 75) | def list_dir(self, path: str, max_depth: int = 2) -> list[str]:
method write_file (line 97) | def write_file(self, path: str, content: str, append: bool = False) ->...
method update_file (line 116) | def update_file(self, path: str, content: bytes) -> None:
FILE: backend/packages/harness/deerflow/community/aio_sandbox/aio_sandbox_provider.py
class AioSandboxProvider (line 45) | class AioSandboxProvider(SandboxProvider):
method __init__ (line 69) | def __init__(self):
method _create_backend (line 98) | def _create_backend(self) -> SandboxBackend:
method _load_config (line 123) | def _load_config(self) -> dict:
method _resolve_env_vars (line 144) | def _resolve_env_vars(env_config: dict[str, str]) -> dict[str, str]:
method _deterministic_sandbox_id (line 158) | def _deterministic_sandbox_id(thread_id: str) -> str:
method _get_extra_mounts (line 168) | def _get_extra_mounts(self, thread_id: str | None) -> list[tuple[str, ...
method _get_thread_mounts (line 184) | def _get_thread_mounts(thread_id: str) -> list[tuple[str, str, bool]]:
method _get_skills_mount (line 205) | def _get_skills_mount() -> tuple[str, str, bool] | None:
method _start_idle_checker (line 226) | def _start_idle_checker(self) -> None:
method _idle_checker_loop (line 236) | def _idle_checker_loop(self) -> None:
method _cleanup_idle_sandboxes (line 244) | def _cleanup_idle_sandboxes(self, idle_timeout: float) -> None:
method _register_signal_handlers (line 296) | def _register_signal_handlers(self) -> None:
method _get_thread_lock (line 318) | def _get_thread_lock(self, thread_id: str) -> threading.Lock:
method acquire (line 327) | def acquire(self, thread_id: str | None = None) -> str:
method _acquire_internal (line 349) | def _acquire_internal(self, thread_id: str | None) -> str:
method _discover_or_create_with_lock (line 394) | def _discover_or_create_with_lock(self, thread_id: str, sandbox_id: st...
method _evict_oldest_warm (line 442) | def _evict_oldest_warm(self) -> str | None:
method _create_sandbox (line 462) | def _create_sandbox(self, thread_id: str | None, sandbox_id: str) -> str:
method get (line 510) | def get(self, sandbox_id: str) -> Sandbox | None:
method release (line 525) | def release(self, sandbox_id: str) -> None:
method destroy (line 551) | def destroy(self, sandbox_id: str) -> None:
method shutdown (line 580) | def shutdown(self) -> None:
FILE: backend/packages/harness/deerflow/community/aio_sandbox/backend.py
function wait_for_sandbox_ready (line 16) | def wait_for_sandbox_ready(sandbox_url: str, timeout: int = 30) -> bool:
class SandboxBackend (line 38) | class SandboxBackend(ABC):
method create (line 47) | def create(self, thread_id: str, sandbox_id: str, extra_mounts: list[t...
method destroy (line 62) | def destroy(self, info: SandboxInfo) -> None:
method is_alive (line 71) | def is_alive(self, info: SandboxInfo) -> bool:
method discover (line 86) | def discover(self, sandbox_id: str) -> SandboxInfo | None:
FILE: backend/packages/harness/deerflow/community/aio_sandbox/local_backend.py
class LocalContainerBackend (line 21) | class LocalContainerBackend(SandboxBackend):
method __init__ (line 34) | def __init__(
method runtime (line 60) | def runtime(self) -> str:
method _detect_runtime (line 64) | def _detect_runtime(self) -> str:
method create (line 93) | def create(self, thread_id: str, sandbox_id: str, extra_mounts: list[t...
method destroy (line 153) | def destroy(self, info: SandboxInfo) -> None:
method is_alive (line 167) | def is_alive(self, info: SandboxInfo) -> bool:
method discover (line 173) | def discover(self, sandbox_id: str) -> SandboxInfo | None:
method _start_container (line 207) | def _start_container(
method _stop_container (line 275) | def _stop_container(self, container_id: str) -> None:
method _is_container_running (line 288) | def _is_container_running(self, container_name: str) -> bool:
method _get_container_port (line 305) | def _get_container_port(self, container_name: str) -> int | None:
FILE: backend/packages/harness/deerflow/community/aio_sandbox/remote_backend.py
class RemoteSandboxBackend (line 30) | class RemoteSandboxBackend(SandboxBackend):
method __init__ (line 43) | def __init__(self, provisioner_url: str):
method provisioner_url (line 53) | def provisioner_url(self) -> str:
method create (line 58) | def create(
method destroy (line 71) | def destroy(self, info: SandboxInfo) -> None:
method is_alive (line 75) | def is_alive(self, info: SandboxInfo) -> bool:
method discover (line 79) | def discover(self, sandbox_id: str) -> SandboxInfo | None:
method _provisioner_create (line 89) | def _provisioner_create(self, thread_id: str, sandbox_id: str, extra_m...
method _provisioner_destroy (line 111) | def _provisioner_destroy(self, sandbox_id: str) -> None:
method _provisioner_is_alive (line 125) | def _provisioner_is_alive(self, sandbox_id: str) -> bool:
method _provisioner_discover (line 139) | def _provisioner_discover(self, sandbox_id: str) -> SandboxInfo | None:
FILE: backend/packages/harness/deerflow/community/aio_sandbox/sandbox_info.py
class SandboxInfo (line 10) | class SandboxInfo:
method to_dict (line 24) | def to_dict(self) -> dict:
method from_dict (line 34) | def from_dict(cls, data: dict) -> SandboxInfo:
FILE: backend/packages/harness/deerflow/community/firecrawl/tools.py
function _get_firecrawl_client (line 9) | def _get_firecrawl_client() -> FirecrawlApp:
function web_search_tool (line 18) | def web_search_tool(query: str) -> str:
function web_fetch_tool (line 50) | def web_fetch_tool(url: str) -> str:
FILE: backend/packages/harness/deerflow/community/image_search/tools.py
function _search_images (line 15) | def _search_images(
function image_search_tool (line 78) | def image_search_tool(
FILE: backend/packages/harness/deerflow/community/infoquest/infoquest_client.py
class InfoQuestClient (line 17) | class InfoQuestClient:
method __init__ (line 20) | def __init__(self, fetch_time: int = -1, fetch_timeout: int = -1, fetc...
method fetch (line 45) | def fetch(self, url: str, return_format: str = "html") -> str:
method _prepare_headers (line 110) | def _prepare_headers() -> dict[str, str]:
method _prepare_crawl_request_data (line 125) | def _prepare_crawl_request_data(self, url: str, return_format: str) ->...
method web_search_raw_results (line 151) | def web_search_raw_results(
method clean_results (line 179) | def clean_results(raw_results: list[dict[str, dict[str, dict[str, Any]...
method web_search (line 234) | def web_search(
method clean_results_with_image_search (line 286) | def clean_results_with_image_search(raw_results: list[dict[str, dict[s...
method image_search_raw_results (line 315) | def image_search_raw_results(
method image_search (line 353) | def image_search(
FILE: backend/packages/harness/deerflow/community/infoquest/tools.py
function _get_infoquest_client (line 11) | def _get_infoquest_client() -> InfoQuestClient:
function web_search_tool (line 49) | def web_search_tool(query: str) -> str:
function web_fetch_tool (line 61) | def web_fetch_tool(url: str) -> str:
function image_search_tool (line 80) | def image_search_tool(query: str) -> str:
FILE: backend/packages/harness/deerflow/community/jina_ai/jina_client.py
class JinaClient (line 9) | class JinaClient:
method crawl (line 10) | def crawl(self, url: str, return_format: str = "html", timeout: int = ...
FILE: backend/packages/harness/deerflow/community/jina_ai/tools.py
function web_fetch_tool (line 11) | def web_fetch_tool(url: str) -> str:
FILE: backend/packages/harness/deerflow/community/tavily/tools.py
function _get_tavily_client (line 9) | def _get_tavily_client() -> TavilyClient:
function web_search_tool (line 18) | def web_search_tool(query: str) -> str:
function web_fetch_tool (line 44) | def web_fetch_tool(url: str) -> str:
FILE: backend/packages/harness/deerflow/config/agents_config.py
class AgentConfig (line 18) | class AgentConfig(BaseModel):
function load_agent_config (line 27) | def load_agent_config(name: str | None) -> AgentConfig | None:
function load_agent_soul (line 72) | def load_agent_soul(agent_name: str | None) -> str | None:
function list_custom_agents (line 92) | def list_custom_agents() -> list[AgentConfig]:
FILE: backend/packages/harness/deerflow/config/app_config.py
class AppConfig (line 27) | class AppConfig(BaseModel):
method resolve_config_path (line 41) | def resolve_config_path(cls, config_path: str | None = None) -> Path:
method from_file (line 70) | def from_file(cls, config_path: str | None = None) -> Self:
method _check_config_version (line 122) | def _check_config_version(cls, config_data: dict, config_path: Path) -...
method resolve_env_variables (line 168) | def resolve_env_variables(cls, config: Any) -> Any:
method get_model_config (line 192) | def get_model_config(self, name: str) -> ModelConfig | None:
method get_tool_config (line 203) | def get_tool_config(self, name: str) -> ToolConfig | None:
method get_tool_group_config (line 214) | def get_tool_group_config(self, name: str) -> ToolGroupConfig | None:
function _get_config_mtime (line 232) | def _get_config_mtime(config_path: Path) -> float | None:
function _load_and_cache_app_config (line 240) | def _load_and_cache_app_config(config_path: str | None = None) -> AppCon...
function get_app_config (line 252) | def get_app_config() -> AppConfig:
function reload_app_config (line 289) | def reload_app_config(config_path: str | None = None) -> AppConfig:
function reset_app_config (line 305) | def reset_app_config() -> None:
function set_app_config (line 319) | def set_app_config(config: AppConfig) -> None:
FILE: backend/packages/harness/deerflow/config/checkpointer_config.py
class CheckpointerConfig (line 10) | class CheckpointerConfig(BaseModel):
function get_checkpointer_config (line 32) | def get_checkpointer_config() -> CheckpointerConfig | None:
function set_checkpointer_config (line 37) | def set_checkpointer_config(config: CheckpointerConfig | None) -> None:
function load_checkpointer_config_from_dict (line 43) | def load_checkpointer_config_from_dict(config_dict: dict) -> None:
FILE: backend/packages/harness/deerflow/config/extensions_config.py
class McpOAuthConfig (line 11) | class McpOAuthConfig(BaseModel):
class McpServerConfig (line 34) | class McpServerConfig(BaseModel):
class SkillStateConfig (line 49) | class SkillStateConfig(BaseModel):
class ExtensionsConfig (line 55) | class ExtensionsConfig(BaseModel):
method resolve_config_path (line 70) | def resolve_config_path(cls, config_path: str | None = None) -> Path |...
method from_file (line 120) | def from_file(cls, config_path: str | None = None) -> "ExtensionsConfig":
method resolve_env_variables (line 147) | def resolve_env_variables(cls, config: dict[str, Any]) -> dict[str, Any]:
method get_enabled_mcp_servers (line 177) | def get_enabled_mcp_servers(self) -> dict[str, McpServerConfig]:
method is_skill_enabled (line 185) | def is_skill_enabled(self, skill_name: str, skill_category: str) -> bool:
function get_extensions_config (line 205) | def get_extensions_config() -> ExtensionsConfig:
function reload_extensions_config (line 220) | def reload_extensions_config(config_path: str | None = None) -> Extensio...
function reset_extensions_config (line 238) | def reset_extensions_config() -> None:
function set_extensions_config (line 249) | def set_extensions_config(config: ExtensionsConfig) -> None:
FILE: backend/packages/harness/deerflow/config/memory_config.py
class MemoryConfig (line 6) | class MemoryConfig(BaseModel):
function get_memory_config (line 64) | def get_memory_config() -> MemoryConfig:
function set_memory_config (line 69) | def set_memory_config(config: MemoryConfig) -> None:
function load_memory_config_from_dict (line 75) | def load_memory_config_from_dict(config_dict: dict) -> None:
FILE: backend/packages/harness/deerflow/config/model_config.py
class ModelConfig (line 4) | class ModelConfig(BaseModel):
FILE: backend/packages/harness/deerflow/config/paths.py
class Paths (line 11) | class Paths:
method __init__ (line 38) | def __init__(self, base_dir: str | Path | None = None) -> None:
method host_base_dir (line 42) | def host_base_dir(self) -> Path:
method base_dir (line 57) | def base_dir(self) -> Path:
method memory_file (line 72) | def memory_file(self) -> Path:
method user_md_file (line 77) | def user_md_file(self) -> Path:
method agents_dir (line 82) | def agents_dir(self) -> Path:
method agent_dir (line 86) | def agent_dir(self, name: str) -> Path:
method agent_memory_file (line 90) | def agent_memory_file(self, name: str) -> Path:
method thread_dir (line 94) | def thread_dir(self, thread_id: str) -> Path:
method sandbox_work_dir (line 109) | def sandbox_work_dir(self, thread_id: str) -> Path:
method sandbox_uploads_dir (line 117) | def sandbox_uploads_dir(self, thread_id: str) -> Path:
method sandbox_outputs_dir (line 125) | def sandbox_outputs_dir(self, thread_id: str) -> Path:
method sandbox_user_data_dir (line 133) | def sandbox_user_data_dir(self, thread_id: str) -> Path:
method ensure_thread_dirs (line 141) | def ensure_thread_dirs(self, thread_id: str) -> None:
method resolve_virtual_path (line 158) | def resolve_virtual_path(self, thread_id: str, virtual_path: str) -> P...
function get_paths (line 199) | def get_paths() -> Paths:
function resolve_path (line 207) | def resolve_path(path: str) -> Path:
FILE: backend/packages/harness/deerflow/config/sandbox_config.py
class VolumeMountConfig (line 4) | class VolumeMountConfig(BaseModel):
class SandboxConfig (line 12) | class SandboxConfig(BaseModel):
FILE: backend/packages/harness/deerflow/config/skills_config.py
class SkillsConfig (line 6) | class SkillsConfig(BaseModel):
method get_skills_path (line 18) | def get_skills_path(self) -> Path:
method get_skill_container_path (line 38) | def get_skill_container_path(self, skill_name: str, category: str = "p...
FILE: backend/packages/harness/deerflow/config/subagents_config.py
class SubagentOverrideConfig (line 10) | class SubagentOverrideConfig(BaseModel):
class SubagentsAppConfig (line 20) | class SubagentsAppConfig(BaseModel):
method get_timeout_for (line 33) | def get_timeout_for(self, agent_name: str) -> int:
function get_subagents_app_config (line 51) | def get_subagents_app_config() -> SubagentsAppConfig:
function load_subagents_config_from_dict (line 56) | def load_subagents_config_from_dict(config_dict: dict) -> None:
FILE: backend/packages/harness/deerflow/config/summarization_config.py
class ContextSize (line 10) | class ContextSize(BaseModel):
method to_tuple (line 16) | def to_tuple(self) -> tuple[ContextSizeType, int | float]:
class SummarizationConfig (line 21) | class SummarizationConfig(BaseModel):
function get_summarization_config (line 60) | def get_summarization_config() -> SummarizationConfig:
function set_summarization_config (line 65) | def set_summarization_config(config: SummarizationConfig) -> None:
function load_summarization_config_from_dict (line 71) | def load_summarization_config_from_dict(config_dict: dict) -> None:
FILE: backend/packages/harness/deerflow/config/title_config.py
class TitleConfig (line 6) | class TitleConfig(BaseModel):
function get_title_config (line 39) | def get_title_config() -> TitleConfig:
function set_title_config (line 44) | def set_title_config(config: TitleConfig) -> None:
function load_title_config_from_dict (line 50) | def load_title_config_from_dict(config_dict: dict) -> None:
FILE: backend/packages/harness/deerflow/config/tool_config.py
class ToolGroupConfig (line 4) | class ToolGroupConfig(BaseModel):
class ToolConfig (line 11) | class ToolConfig(BaseModel):
FILE: backend/packages/harness/deerflow/config/tool_search_config.py
class ToolSearchConfig (line 6) | class ToolSearchConfig(BaseModel):
function get_tool_search_config (line 23) | def get_tool_search_config() -> ToolSearchConfig:
function load_tool_search_config_from_dict (line 31) | def load_tool_search_config_from_dict(data: dict) -> ToolSearchConfig:
FILE: backend/packages/harness/deerflow/config/tracing_config.py
class TracingConfig (line 11) | class TracingConfig(BaseModel):
method is_configured (line 20) | def is_configured(self) -> bool:
function _env_flag_preferred (line 31) | def _env_flag_preferred(*names: str) -> bool:
function _first_env_value (line 45) | def _first_env_value(*names: str) -> str | None:
function get_tracing_config (line 54) | def get_tracing_config() -> TracingConfig:
function is_tracing_enabled (line 89) | def is_tracing_enabled() -> bool:
FILE: backend/packages/harness/deerflow/mcp/cache.py
function _get_config_mtime (line 17) | def _get_config_mtime() -> float | None:
function _is_cache_stale (line 31) | def _is_cache_stale() -> bool:
function initialize_mcp_tools (line 56) | async def initialize_mcp_tools() -> list[BaseTool]:
function get_cached_mcp_tools (line 82) | def get_cached_mcp_tools() -> list[BaseTool]:
function reset_mcp_tools_cache (line 129) | def reset_mcp_tools_cache() -> None:
FILE: backend/packages/harness/deerflow/mcp/client.py
function build_server_params (line 11) | def build_server_params(server_name: str, config: McpServerConfig) -> di...
function build_servers_config (line 45) | def build_servers_config(extensions_config: ExtensionsConfig) -> dict[st...
FILE: backend/packages/harness/deerflow/mcp/oauth.py
class _OAuthToken (line 17) | class _OAuthToken:
class OAuthTokenManager (line 25) | class OAuthTokenManager:
method __init__ (line 28) | def __init__(self, oauth_by_server: dict[str, McpOAuthConfig]):
method from_extensions_config (line 34) | def from_extensions_config(cls, extensions_config: ExtensionsConfig) -...
method has_oauth_servers (line 41) | def has_oauth_servers(self) -> bool:
method oauth_server_names (line 44) | def oauth_server_names(self) -> list[str]:
method get_authorization_header (line 47) | async def get_authorization_header(self, server_name: str) -> str | None:
method _is_expiring (line 68) | def _is_expiring(token: _OAuthToken, oauth: McpOAuthConfig) -> bool:
method _fetch_token (line 72) | async def _fetch_token(self, oauth: McpOAuthConfig) -> _OAuthToken:
function build_oauth_tool_interceptor (line 122) | def build_oauth_tool_interceptor(extensions_config: ExtensionsConfig) ->...
function get_initial_oauth_headers (line 140) | async def get_initial_oauth_headers(extensions_config: ExtensionsConfig)...
FILE: backend/packages/harness/deerflow/mcp/tools.py
function get_mcp_tools (line 14) | async def get_mcp_tools() -> list[BaseTool]:
FILE: backend/packages/harness/deerflow/models/claude_provider.py
class ClaudeChatModel (line 31) | class ClaudeChatModel(ChatAnthropic):
method _validate_retry_config (line 52) | def _validate_retry_config(self) -> None:
method model_post_init (line 56) | def model_post_init(self, __context: Any) -> None:
method _patch_client_oauth (line 115) | def _patch_client_oauth(self, client: Any) -> None:
method _get_request_payload (line 121) | def _get_request_payload(
method _apply_prompt_caching (line 139) | def _apply_prompt_caching(self, payload: dict) -> None:
method _apply_thinking_budget (line 182) | def _apply_thinking_budget(self, payload: dict) -> None:
method _generate (line 195) | def _generate(self, messages: list[BaseMessage], stop: list[str] | Non...
method _agenerate (line 220) | async def _agenerate(self, messages: list[BaseMessage], stop: list[str...
method _calc_backoff_ms (line 248) | def _calc_backoff_ms(attempt: int, error: Exception) -> int:
FILE: backend/packages/harness/deerflow/models/credential_loader.py
function is_oauth_token (line 29) | def is_oauth_token(token: str) -> bool:
class ClaudeCodeCredential (line 35) | class ClaudeCodeCredential:
method is_expired (line 44) | def is_expired(self) -> bool:
class CodexCliCredential (line 51) | class CodexCliCredential:
function _resolve_credential_path (line 59) | def _resolve_credential_path(env_var: str, default_relative_path: str) -...
function _load_json_file (line 66) | def _load_json_file(path: Path, label: str) -> dict[str, Any] | None:
function _read_secret_from_file_descriptor (line 81) | def _read_secret_from_file_descriptor(env_var: str) -> str | None:
function _credential_from_direct_token (line 101) | def _credential_from_direct_token(access_token: str, source: str) -> Cla...
function _iter_claude_code_credential_paths (line 108) | def _iter_claude_code_credential_paths() -> list[Path]:
function _extract_claude_code_credential (line 121) | def _extract_claude_code_credential(data: dict[str, Any], source: str) -...
function load_claude_code_credential (line 142) | def load_claude_code_credential() -> ClaudeCodeCredential | None:
function load_codex_cli_credential (line 191) | def load_codex_cli_credential() -> CodexCliCredential | None:
FILE: backend/packages/harness/deerflow/models/factory.py
function create_chat_model (line 11) | def create_chat_model(name: str | None = None, thinking_enabled: bool = ...
FILE: backend/packages/harness/deerflow/models/openai_codex_provider.py
class CodexChatModel (line 33) | class CodexChatModel(BaseChatModel):
method _llm_type (line 52) | def _llm_type(self) -> str:
method _validate_retry_config (line 55) | def _validate_retry_config(self) -> None:
method model_post_init (line 59) | def model_post_init(self, __context: Any) -> None:
method _load_codex_auth (line 73) | def _load_codex_auth(self) -> CodexCliCredential | None:
method _normalize_content (line 78) | def _normalize_content(cls, content: Any) -> str:
method _convert_messages (line 105) | def _convert_messages(self, messages: list[BaseMessage]) -> tuple[str,...
method _convert_tools (line 148) | def _convert_tools(self, tools: list[dict]) -> list[dict]:
method _call_codex_api (line 173) | def _call_codex_api(self, messages: list[BaseMessage], tools: list[dic...
method _stream_response (line 216) | def _stream_response(self, headers: dict, payload: dict) -> dict:
method _parse_sse_data_line (line 234) | def _parse_sse_data_line(line: str) -> dict[str, Any] | None:
method _parse_tool_call_arguments (line 251) | def _parse_tool_call_arguments(self, output_item: dict[str, Any]) -> t...
method _parse_response (line 280) | def _parse_response(self, response: dict) -> ChatResult:
method _generate (line 342) | def _generate(
method bind_tools (line 354) | def bind_tools(self, tools: list, **kwargs: Any) -> Any:
FILE: backend/packages/harness/deerflow/models/patched_deepseek.py
class PatchedChatDeepSeek (line 17) | class PatchedChatDeepSeek(ChatDeepSeek):
method _get_request_payload (line 26) | def _get_request_payload(
FILE: backend/packages/harness/deerflow/models/patched_minimax.py
function _extract_reasoning_text (line 31) | def _extract_reasoning_text(
function _strip_inline_think_tags (line 52) | def _strip_inline_think_tags(content: str) -> tuple[str, str | None]:
function _merge_reasoning (line 66) | def _merge_reasoning(*values: str | None) -> str | None:
function _with_reasoning_content (line 77) | def _with_reasoning_content(
class PatchedChatMiniMax (line 100) | class PatchedChatMiniMax(ChatOpenAI):
method _get_request_payload (line 103) | def _get_request_payload(
method _convert_chunk_to_generation_chunk (line 121) | def _convert_chunk_to_generation_chunk(
method _create_chat_result (line 189) | def _create_chat_result(
FILE: backend/packages/harness/deerflow/reflection/resolvers.py
function _build_missing_dependency_hint (line 11) | def _build_missing_dependency_hint(module_path: str, err: ImportError) -...
function resolve_variable (line 25) | def resolve_variable[T](
function resolve_class (line 73) | def resolve_class[T](class_path: str, base_class: type[T] | None = None)...
FILE: backend/packages/harness/deerflow/sandbox/exceptions.py
class SandboxError (line 4) | class SandboxError(Exception):
method __init__ (line 7) | def __init__(self, message: str, details: dict | None = None):
method __str__ (line 12) | def __str__(self) -> str:
class SandboxNotFoundError (line 19) | class SandboxNotFoundError(SandboxError):
method __init__ (line 22) | def __init__(self, message: str = "Sandbox not found", sandbox_id: str...
class SandboxRuntimeError (line 28) | class SandboxRuntimeError(SandboxError):
class SandboxCommandError (line 34) | class SandboxCommandError(SandboxError):
method __init__ (line 37) | def __init__(self, message: str, command: str | None = None, exit_code...
class SandboxFileError (line 48) | class SandboxFileError(SandboxError):
method __init__ (line 51) | def __init__(self, message: str, path: str | None = None, operation: s...
class SandboxPermissionError (line 62) | class SandboxPermissionError(SandboxFileError):
class SandboxFileNotFoundError (line 68) | class SandboxFileNotFoundError(SandboxFileError):
FILE: backend/packages/harness/deerflow/sandbox/local/list_dir.py
function _should_ignore (line 64) | def _should_ignore(name: str) -> bool:
function list_dir (line 72) | def list_dir(path: str, max_depth: int = 2) -> list[str]:
FILE: backend/packages/harness/deerflow/sandbox/local/local_sandbox.py
class LocalSandbox (line 9) | class LocalSandbox(Sandbox):
method __init__ (line 10) | def __init__(self, id: str):
method _get_shell (line 20) | def _get_shell() -> str:
method execute_command (line 35) | def execute_command(self, command: str) -> str:
method list_dir (line 52) | def list_dir(self, path: str, max_depth=2) -> list[str]:
method read_file (line 55) | def read_file(self, path: str) -> str:
method write_file (line 59) | def write_file(self, path: str, content: str, append: bool = False) ->...
method update_file (line 67) | def update_file(self, path: str, content: bytes) -> None:
FILE: backend/packages/harness/deerflow/sandbox/local/local_sandbox_provider.py
class LocalSandboxProvider (line 8) | class LocalSandboxProvider(SandboxProvider):
method acquire (line 9) | def acquire(self, thread_id: str | None = None) -> str:
method get (line 15) | def get(self, sandbox_id: str) -> Sandbox | None:
method release (line 22) | def release(self, sandbox_id: str) -> None:
FILE: backend/packages/harness/deerflow/sandbox/middleware.py
class SandboxMiddlewareState (line 14) | class SandboxMiddlewareState(AgentState):
class SandboxMiddleware (line 21) | class SandboxMiddleware(AgentMiddleware[SandboxMiddlewareState]):
method __init__ (line 34) | def __init__(self, lazy_init: bool = True):
method _acquire_sandbox (line 45) | def _acquire_sandbox(self, thread_id: str) -> str:
method before_agent (line 52) | def before_agent(self, state: SandboxMiddlewareState, runtime: Runtime...
method after_agent (line 66) | def after_agent(self, state: SandboxMiddlewareState, runtime: Runtime)...
FILE: backend/packages/harness/deerflow/sandbox/sandbox.py
class Sandbox (line 4) | class Sandbox(ABC):
method __init__ (line 9) | def __init__(self, id: str):
method id (line 13) | def id(self) -> str:
method execute_command (line 17) | def execute_command(self, command: str) -> str:
method read_file (line 29) | def read_file(self, path: str) -> str:
method list_dir (line 41) | def list_dir(self, path: str, max_depth=2) -> list[str]:
method write_file (line 54) | def write_file(self, path: str, content: str, append: bool = False) ->...
method update_file (line 65) | def update_file(self, path: str, content: bytes) -> None:
FILE: backend/packages/harness/deerflow/sandbox/sandbox_provider.py
class SandboxProvider (line 8) | class SandboxProvider(ABC):
method acquire (line 12) | def acquire(self, thread_id: str | None = None) -> str:
method get (line 21) | def get(self, sandbox_id: str) -> Sandbox | None:
method release (line 30) | def release(self, sandbox_id: str) -> None:
function get_sandbox_provider (line 42) | def get_sandbox_provider(**kwargs) -> SandboxProvider:
function reset_sandbox_provider (line 59) | def reset_sandbox_provider() -> None:
function shutdown_sandbox_provider (line 73) | def shutdown_sandbox_provider() -> None:
function set_sandbox_provider (line 87) | def set_sandbox_provider(provider: SandboxProvider) -> None:
FILE: backend/packages/harness/deerflow/sandbox/tools.py
function _get_skills_container_path (line 30) | def _get_skills_container_path() -> str:
function _get_skills_host_path (line 50) | def _get_skills_host_path() -> str | None:
function _is_skills_path (line 75) | def _is_skills_path(path: str) -> bool:
function _resolve_skills_path (line 81) | def _resolve_skills_path(path: str) -> str:
function _path_variants (line 105) | def _path_variants(path: str) -> set[str]:
function _sanitize_error (line 109) | def _sanitize_error(error: Exception, runtime: "ToolRuntime[ContextT, Th...
function replace_virtual_path (line 123) | def replace_virtual_path(path: str, thread_data: ThreadDataState | None)...
function _thread_virtual_to_actual_mappings (line 156) | def _thread_virtual_to_actual_mappings(thread_data: ThreadDataState) -> ...
function _thread_actual_to_virtual_mappings (line 181) | def _thread_actual_to_virtual_mappings(thread_data: ThreadDataState) -> ...
function mask_local_paths_in_output (line 186) | def mask_local_paths_in_output(output: str, thread_data: ThreadDataState...
function _reject_path_traversal (line 239) | def _reject_path_traversal(path: str) -> None:
function validate_local_tool_path (line 248) | def validate_local_tool_path(path: str, thread_data: ThreadDataState | N...
function _validate_resolved_user_data_path (line 287) | def _validate_resolved_user_data_path(resolved: Path, thread_data: Threa...
function _resolve_and_validate_user_data_path (line 315) | def _resolve_and_validate_user_data_path(path: str, thread_data: ThreadD...
function validate_local_bash_command_paths (line 326) | def validate_local_bash_command_paths(command: str, thread_data: ThreadD...
function replace_virtual_paths_in_command (line 362) | def replace_virtual_paths_in_command(command: str, thread_data: ThreadDa...
function get_thread_data (line 397) | def get_thread_data(runtime: ToolRuntime[ContextT, ThreadState] | None) ...
function is_local_sandbox (line 406) | def is_local_sandbox(runtime: ToolRuntime[ContextT, ThreadState] | None)...
function sandbox_from_runtime (line 422) | def sandbox_from_runtime(runtime: ToolRuntime[ContextT, ThreadState] | N...
function ensure_sandbox_initialized (line 450) | def ensure_sandbox_initialized(runtime: ToolRuntime[ContextT, ThreadStat...
function ensure_thread_directories_exist (line 505) | def ensure_thread_directories_exist(runtime: ToolRuntime[ContextT, Threa...
function bash_tool (line 543) | def bash_tool(runtime: ToolRuntime[ContextT, ThreadState], description: ...
function ls_tool (line 574) | def ls_tool(runtime: ToolRuntime[ContextT, ThreadState], description: st...
function read_file_tool (line 607) | def read_file_tool(
function write_file_tool (line 652) | def write_file_tool(
function str_replace_tool (line 689) | def str_replace_tool(
FILE: backend/packages/harness/deerflow/skills/loader.py
function get_skills_root_path (line 8) | def get_skills_root_path() -> Path:
function load_skills (line 22) | def load_skills(skills_path: Path | None = None, use_config: bool = True...
FILE: backend/packages/harness/deerflow/skills/parser.py
function parse_skill_file (line 7) | def parse_skill_file(skill_file: Path, category: str, relative_path: Pat...
FILE: backend/packages/harness/deerflow/skills/types.py
class Skill (line 6) | class Skill:
method skill_path (line 19) | def skill_path(self) -> str:
method get_container_path (line 24) | def get_container_path(self, container_base_path: str = "/mnt/skills")...
method get_container_file_path (line 40) | def get_container_file_path(self, container_base_path: str = "/mnt/ski...
method __repr__ (line 52) | def __repr__(self) -> str:
FILE: backend/packages/harness/deerflow/skills/validation.py
function _validate_skill_frontmatter (line 15) | def _validate_skill_frontmatter(skill_dir: Path) -> tuple[bool, str, str...
FILE: backend/packages/harness/deerflow/subagents/config.py
class SubagentConfig (line 7) | class SubagentConfig:
FILE: backend/packages/harness/deerflow/subagents/executor.py
class SubagentStatus (line 26) | class SubagentStatus(Enum):
class SubagentResult (line 37) | class SubagentResult:
method __post_init__ (line 60) | def __post_init__(self):
function _filter_tools (line 78) | def _filter_tools(
function _get_model_name (line 108) | def _get_model_name(config: SubagentConfig, parent_model: str | None) ->...
class SubagentExecutor (line 123) | class SubagentExecutor:
method __init__ (line 126) | def __init__(
method _create_agent (line 164) | def _create_agent(self):
method _build_initial_state (line 182) | def _build_initial_state(self, task: str) -> dict[str, Any]:
method _aexecute (line 203) | async def _aexecute(self, task: str, result_holder: SubagentResult | N...
method execute (line 351) | def execute(self, task: str, result_holder: SubagentResult | None = No...
method execute_async (line 391) | def execute_async(self, task: str, task_id: str | None = None) -> str:
function get_background_task_result (line 459) | def get_background_task_result(task_id: str) -> SubagentResult | None:
function list_background_tasks (line 472) | def list_background_tasks() -> list[SubagentResult]:
function cleanup_background_task (line 482) | def cleanup_background_task(task_id: str) -> None:
FILE: backend/packages/harness/deerflow/subagents/registry.py
function get_subagent_config (line 12) | def get_subagent_config(name: str) -> SubagentConfig | None:
function list_subagents (line 37) | def list_subagents() -> list[SubagentConfig]:
function get_subagent_names (line 46) | def get_subagent_names() -> list[str]:
FILE: backend/packages/harness/deerflow/tools/builtins/clarification_tool.py
function ask_clarification_tool (line 7) | def ask_clarification_tool(
FILE: backend/packages/harness/deerflow/tools/builtins/present_file_tool.py
function _normalize_presented_filepath (line 15) | def _normalize_presented_filepath(
function present_file_tool (line 63) | def present_file_tool(
FILE: backend/packages/harness/deerflow/tools/builtins/setup_agent_tool.py
function setup_agent (line 15) | def setup_agent(
FILE: backend/packages/harness/deerflow/tools/builtins/task_tool.py
function task_tool (line 22) | def task_tool(
FILE: backend/packages/harness/deerflow/tools/builtins/tool_search.py
class DeferredToolEntry (line 30) | class DeferredToolEntry:
class DeferredToolRegistry (line 38) | class DeferredToolRegistry:
method __init__ (line 41) | def __init__(self):
method register (line 44) | def register(self, tool: BaseTool) -> None:
method search (line 53) | def search(self, query: str) -> list[BaseTool]:
method entries (line 96) | def entries(self) -> list[DeferredToolEntry]:
method __len__ (line 99) | def __len__(self) -> int:
function _regex_score (line 103) | def _regex_score(pattern: str, entry: DeferredToolEntry) -> int:
function get_deferred_registry (line 116) | def get_deferred_registry() -> DeferredToolRegistry | None:
function set_deferred_registry (line 120) | def set_deferred_registry(registry: DeferredToolRegistry) -> None:
function reset_deferred_registry (line 125) | def reset_deferred_registry() -> None:
function tool_search (line 135) | def tool_search(query: str) -> str:
FILE: backend/packages/harness/deerflow/tools/builtins/view_image_tool.py
function view_image_tool (line 16) | def view_image_tool(
FILE: backend/packages/harness/deerflow/tools/tools.py
function get_available_tools (line 23) | def get_available_tools(
FILE: backend/packages/harness/deerflow/utils/file_conversion.py
function convert_file_to_markdown (line 24) | async def convert_file_to_markdown(file_path: Path) -> Path | None:
FILE: backend/packages/harness/deerflow/utils/network.py
class PortAllocator (line 8) | class PortAllocator:
method __init__ (line 31) | def __init__(self):
method _is_port_available (line 35) | def _is_port_available(self, port: int) -> bool:
method allocate (line 58) | def allocate(self, start_port: int = 8080, max_range: int = 100) -> int:
method release (line 82) | def release(self, port: int) -> None:
method allocate_context (line 92) | def allocate_context(self, start_port: int = 8080, max_range: int = 100):
function get_free_port (line 113) | def get_free_port(start_port: int = 8080, max_range: int = 100) -> int:
function release_port (line 133) | def release_port(port: int) -> None:
FILE: backend/packages/harness/deerflow/utils/readability.py
class Article (line 12) | class Article:
method __init__ (line 15) | def __init__(self, title: str, html_content: str):
method to_markdown (line 19) | def to_markdown(self, including_title: bool = True) -> str:
method to_message (line 31) | def to_message(self) -> list[dict]:
class ReadabilityExtractor (line 58) | class ReadabilityExtractor:
method extract_article (line 59) | def extract_article(self, html: str) -> Article:
FILE: backend/tests/test_app_config_reload.py
function _write_config (line 12) | def _write_config(path: Path, *, model_name: str, supports_thinking: boo...
function _write_extensions_config (line 31) | def _write_extensions_config(path: Path) -> None:
function test_get_app_config_reloads_when_file_changes (line 35) | def test_get_app_config_reloads_when_file_changes(tmp_path, monkeypatch):
function test_get_app_config_reloads_when_config_path_changes (line 60) | def test_get_app_config_reloads_when_config_path_changes(tmp_path, monke...
FILE: backend/tests/test_artifacts_router.py
function test_get_artifact_reads_utf8_text_file_on_windows_locale (line 9) | def test_get_artifact_reads_utf8_text_file_on_windows_locale(tmp_path, m...
FILE: backend/tests/test_channel_file_attachments.py
function _run (line 13) | def _run(coro):
class TestResolvedAttachment (line 27) | class TestResolvedAttachment:
method test_basic_construction (line 28) | def test_basic_construction(self, tmp_path):
method test_image_detection (line 44) | def test_image_detection(self, tmp_path):
class TestOutboundMessageAttachments (line 64) | class TestOutboundMessageAttachments:
method test_default_empty_attachments (line 65) | def test_default_empty_attachments(self):
method test_attachments_populated (line 74) | def test_attachments_populated(self, tmp_path):
class TestResolveAttachments (line 102) | class TestResolveAttachments:
method test_resolves_existing_file (line 103) | def test_resolves_existing_file(self, tmp_path):
method test_resolves_image_file (line 127) | def test_resolves_image_file(self, tmp_path):
method test_skips_missing_file (line 148) | def test_skips_missing_file(self, tmp_path):
method test_skips_invalid_path (line 164) | def test_skips_invalid_path(self):
method test_rejects_uploads_path (line 176) | def test_rejects_uploads_path(self):
method test_rejects_workspace_path (line 188) | def test_rejects_workspace_path(self):
method test_rejects_path_traversal_escape (line 200) | def test_rejects_path_traversal_escape(self, tmp_path):
method test_multiple_artifacts_partial_resolution (line 221) | def test_multiple_artifacts_partial_resolution(self, tmp_path):
class _DummyChannel (line 256) | class _DummyChannel(Channel):
method __init__ (line 259) | def __init__(self, bus):
method start (line 264) | async def start(self):
method stop (line 267) | async def stop(self):
method send (line 270) | async def send(self, msg: OutboundMessage) -> None:
method send_file (line 273) | async def send_file(self, msg: OutboundMessage, attachment: ResolvedAt...
class TestBaseChannelOnOutbound (line 278) | class TestBaseChannelOnOutbound:
method test_send_file_called_for_each_attachment (line 279) | def test_send_file_called_for_each_attachment(self, tmp_path):
method test_no_attachments_no_send_file (line 307) | def test_no_attachments_no_send_file(self):
method test_send_file_failure_does_not_block_others (line 324) | def test_send_file_failure_does_not_block_others(self, tmp_path):
method test_send_raises_skips_file_uploads (line 364) | def test_send_raises_skips_file_uploads(self, tmp_path):
method test_default_send_file_returns_false (line 390) | def test_default_send_file_returns_false(self):
class TestManagerArtifactResolution (line 417) | class TestManagerArtifactResolution:
method test_handle_chat_populates_attachments (line 418) | def test_handle_chat_populates_attachments(self):
method test_format_artifact_text_for_unresolved (line 428) | def test_format_artifact_text_for_unresolved(self):
FILE: backend/tests/test_channels.py
function _run (line 19) | def _run(coro):
function _wait_for (line 28) | async def _wait_for(condition, *, timeout=5.0, interval=0.05):
class TestMessageBus (line 45) | class TestMessageBus:
method test_publish_and_get_inbound (line 46) | def test_publish_and_get_inbound(self):
method test_inbound_queue_is_fifo (line 64) | def test_inbound_queue_is_fifo(self):
method test_outbound_callback (line 76) | def test_outbound_callback(self):
method test_unsubscribe_outbound (line 92) | def test_unsubscribe_outbound(self):
method test_outbound_error_does_not_crash (line 108) | def test_outbound_error_does_not_crash(self):
method test_inbound_message_defaults (line 128) | def test_inbound_message_defaults(self):
method test_outbound_message_defaults (line 136) | def test_outbound_message_defaults(self):
class TestChannelStore (line 149) | class TestChannelStore:
method store (line 151) | def store(self, tmp_path):
method test_set_and_get_thread_id (line 154) | def test_set_and_get_thread_id(self, store):
method test_get_nonexistent_returns_none (line 158) | def test_get_nonexistent_returns_none(self, store):
method test_remove (line 161) | def test_remove(self, store):
method test_remove_nonexistent_returns_false (line 166) | def test_remove_nonexistent_returns_false(self, store):
method test_list_entries_all (line 169) | def test_list_entries_all(self, store):
method test_list_entries_filtered (line 175) | def test_list_entries_filtered(self, store):
method test_persistence (line 182) | def test_persistence(self, tmp_path):
method test_update_preserves_created_at (line 190) | def test_update_preserves_created_at(self, store):
method test_corrupt_file_handled (line 201) | def test_corrupt_file_handled(self, tmp_path):
class DummyChannel (line 213) | class DummyChannel(Channel):
method __init__ (line 216) | def __init__(self, bus, config=None):
method start (line 221) | async def start(self):
method stop (line 225) | async def stop(self):
method send (line 229) | async def send(self, msg: OutboundMessage):
class TestChannelBase (line 233) | class TestChannelBase:
method test_make_inbound (line 234) | def test_make_inbound(self):
method test_on_outbound_routes_to_channel (line 248) | def test_on_outbound_routes_to_channel(self):
method test_on_outbound_ignores_other_channels (line 260) | def test_on_outbound_ignores_other_channels(self):
class TestExtractResponseText (line 278) | class TestExtractResponseText:
method test_string_content (line 279) | def test_string_content(self):
method test_list_content_blocks (line 285) | def test_list_content_blocks(self):
method test_picks_last_ai_message (line 291) | def test_picks_last_ai_message(self):
method test_empty_messages (line 303) | def test_empty_messages(self):
method test_no_ai_messages (line 308) | def test_no_ai_messages(self):
method test_list_result (line 314) | def test_list_result(self):
method test_skips_empty_ai_content (line 320) | def test_skips_empty_ai_content(self):
method test_clarification_tool_message (line 331) | def test_clarification_tool_message(self):
method test_clarification_over_empty_ai (line 343) | def test_clarification_over_empty_ai(self):
method test_does_not_leak_previous_turn_text (line 355) | def test_does_not_leak_previous_turn_text(self):
function _make_mock_langgraph_client (line 381) | def _make_mock_langgraph_client(thread_id="test-thread-123", run_result=...
function _make_stream_part (line 404) | def _make_stream_part(event: str, data):
function _make_async_iterator (line 408) | def _make_async_iterator(items):
class TestChannelManager (line 416) | class TestChannelManager:
method test_handle_chat_creates_thread (line 417) | def test_handle_chat_creates_thread(self):
method test_handle_chat_uses_channel_session_overrides (line 461) | def test_handle_chat_uses_channel_session_overrides(self):
method test_handle_chat_uses_user_session_overrides (line 508) | def test_handle_chat_uses_user_session_overrides(self):
method test_handle_feishu_chat_streams_multiple_outbound_updates (line 567) | def test_handle_feishu_chat_streams_multiple_outbound_updates(self, mo...
method test_handle_feishu_stream_error_still_sends_final (line 635) | def test_handle_feishu_stream_error_still_sends_final(self, monkeypatch):
method test_handle_command_help (line 687) | def test_handle_command_help(self):
method test_handle_command_new (line 720) | def test_handle_command_new(self):
method test_each_topic_creates_new_thread (line 762) | def test_each_topic_creates_new_thread(self):
method test_same_topic_reuses_thread (line 814) | def test_same_topic_reuses_thread(self):
method test_none_topic_reuses_thread (line 858) | def test_none_topic_reuses_thread(self):
method test_different_topics_get_different_threads (line 902) | def test_different_topics_get_different_threads(self):
method test_handle_command_bootstrap_with_text (line 946) | def test_handle_command_bootstrap_with_text(self):
method test_handle_command_bootstrap_without_text (line 996) | def test_handle_command_bootstrap_without_text(self):
method test_handle_command_bootstrap_feishu_uses_streaming (line 1037) | def test_handle_command_bootstrap_feishu_uses_streaming(self, monkeypa...
method test_handle_command_bootstrap_creates_thread_if_needed (line 1100) | def test_handle_command_bootstrap_creates_thread_if_needed(self):
method test_help_includes_bootstrap (line 1138) | def test_help_includes_bootstrap(self):
class TestExtractArtifacts (line 1176) | class TestExtractArtifacts:
method test_extracts_from_present_files_tool_call (line 1177) | def test_extracts_from_present_files_tool_call(self):
method test_empty_when_no_present_files (line 1195) | def test_empty_when_no_present_files(self):
method test_empty_for_list_result_no_tool_calls (line 1206) | def test_empty_for_list_result_no_tool_calls(self):
method test_only_extracts_after_last_human_message (line 1212) | def test_only_extracts_after_last_human_message(self):
method test_multiple_files_in_single_call (line 1241) | def test_multiple_files_in_single_call(self):
class TestFormatArtifactText (line 1259) | class TestFormatArtifactText:
method test_single_artifact (line 1260) | def test_single_artifact(self):
method test_multiple_artifacts (line 1266) | def test_multiple_artifacts(self):
class TestHandleChatWithArtifacts (line 1275) | class TestHandleChatWithArtifacts:
method test_artifacts_appended_to_text (line 1276) | def test_artifacts_appended_to_text(self):
method test_artifacts_only_no_text (line 1322) | def test_artifacts_only_no_text(self):
method test_only_last_turn_artifacts_returned (line 1370) | def test_only_last_turn_artifacts_returned(self):
class TestFeishuChannel (line 1453) | class TestFeishuChannel:
method test_prepare_inbound_publishes_without_waiting_for_running_card (line 1454) | def test_prepare_inbound_publishes_without_waiting_for_running_card(se...
method test_prepare_inbound_and_send_share_running_card_task (line 1496) | def test_prepare_inbound_and_send_share_running_card_task(self):
method test_streaming_reuses_single_running_card (line 1555) | def test_streaming_reuses_single_running_card(self):
class TestChannelService (line 1627) | class TestChannelService:
method test_get_status_no_channels (line 1628) | def test_get_status_no_channels(self):
method test_disabled_channels_are_skipped (line 1645) | def test_disabled_channels_are_skipped(self):
method test_session_config_is_forwarded_to_manager (line 1660) | def test_session_config_is_forwarded_to_manager(self):
class TestSlackSendRetry (line 1690) | class TestSlackSendRetry:
method test_retries_on_failure_then_succeeds (line 1691) | def test_retries_on_failure_then_succeeds(self):
method test_raises_after_all_retries_exhausted (line 1717) | def test_raises_after_all_retries_exhausted(self):
class TestTelegramSendRetry (line 1742) | class TestTelegramSendRetry:
method test_retries_on_failure_then_succeeds (line 1743) | def test_retries_on_failure_then_succeeds(self):
method test_raises_after_all_retries_exhausted (line 1773) | def test_raises_after_all_retries_exhausted(self):
function _make_telegram_update (line 1800) | def _make_telegram_update(chat_type: str, message_id: int, *, reply_to_m...
class TestTelegramPrivateChatThread (line 1817) | class TestTelegramPrivateChatThread:
method test_private_chat_no_reply_uses_none_topic (line 1820) | def test_private_chat_no_reply_uses_none_topic(self):
method test_private_chat_with_reply_still_uses_none_topic (line 1836) | def test_private_chat_with_reply_still_uses_none_topic(self):
method test_group_chat_no_reply_uses_msg_id_as_topic (line 1852) | def test_group_chat_no_reply_uses_msg_id_as_topic(self):
method test_group_chat_reply_uses_reply_msg_id_as_topic (line 1868) | def test_group_chat_reply_uses_reply_msg_id_as_topic(self):
method test_supergroup_chat_uses_msg_id_as_topic (line 1884) | def test_supergroup_chat_uses_msg_id_as_topic(self):
method test_cmd_generic_private_chat_uses_none_topic (line 1900) | def test_cmd_generic_private_chat_uses_none_topic(self):
method test_cmd_generic_group_chat_uses_msg_id_as_topic (line 1917) | def test_cmd_generic_group_chat_uses_msg_id_as_topic(self):
method test_cmd_generic_group_chat_reply_uses_reply_msg_id_as_topic (line 1934) | def test_cmd_generic_group_chat_reply_uses_reply_msg_id_as_topic(self):
class TestTelegramProcessingOrder (line 1952) | class TestTelegramProcessingOrder:
method test_running_reply_sent_before_publish (line 1955) | def test_running_reply_sent_before_publish(self):
class TestSlackMarkdownConversion (line 1987) | class TestSlackMarkdownConversion:
method test_bold_converted (line 1990) | def test_bold_converted(self):
method test_link_converted (line 1997) | def test_link_converted(self):
method test_heading_converted (line 2003) | def test_heading_converted(self):
FILE: backend/tests/test_checkpointer.py
function reset_state (line 19) | def reset_state():
class TestCheckpointerConfig (line 35) | class TestCheckpointerConfig:
method test_load_memory_config (line 36) | def test_load_memory_config(self):
method test_load_sqlite_config (line 43) | def test_load_sqlite_config(self):
method test_load_postgres_config (line 50) | def test_load_postgres_config(self):
method test_default_connection_string_is_none (line 57) | def test_default_connection_string_is_none(self):
method test_set_config_to_none (line 61) | def test_set_config_to_none(self):
method test_invalid_type_raises (line 66) | def test_invalid_type_raises(self):
class TestGetCheckpointer (line 76) | class TestGetCheckpointer:
method test_returns_in_memory_saver_when_not_configured (line 77) | def test_returns_in_memory_saver_when_not_configured(self):
method test_memory_returns_in_memory_saver (line 86) | def test_memory_returns_in_memory_saver(self):
method test_memory_singleton (line 93) | def test_memory_singleton(self):
method test_reset_clears_singleton (line 99) | def test_reset_clears_singleton(self):
method test_sqlite_raises_when_package_missing (line 106) | def test_sqlite_raises_when_package_missing(self):
method test_postgres_raises_when_package_missing (line 113) | def test_postgres_raises_when_package_missing(self):
method test_postgres_raises_when_connection_string_missing (line 120) | def test_postgres_raises_when_connection_string_missing(self):
method test_sqlite_creates_saver (line 130) | def test_sqlite_creates_saver(self):
method test_postgres_creates_saver (line 153) | def test_postgres_creates_saver(self):
class TestAppConfigLoadsCheckpointer (line 182) | class TestAppConfigLoadsCheckpointer:
method test_load_checkpointer_section (line 183) | def test_load_checkpointer_section(self):
class TestClientCheckpointerFallback (line 197) | class TestClientCheckpointerFallback:
method test_client_uses_config_checkpointer_when_none_provided (line 198) | def test_client_uses_config_checkpointer_when_none_provided(self):
method test_client_explicit_checkpointer_takes_precedence (line 233) | def test_client_explicit_checkpointer_takes_precedence(self):
FILE: backend/tests/test_checkpointer_none_fix.py
class TestCheckpointerNoneFix (line 9) | class TestCheckpointerNoneFix:
method test_async_make_checkpointer_returns_in_memory_saver_when_not_configured (line 13) | async def test_async_make_checkpointer_returns_in_memory_saver_when_no...
method test_sync_checkpointer_context_returns_in_memory_saver_when_not_configured (line 36) | def test_sync_checkpointer_context_returns_in_memory_saver_when_not_co...
FILE: backend/tests/test_cli_auth_providers.py
function test_codex_provider_rejects_non_positive_retry_attempts (line 13) | def test_codex_provider_rejects_non_positive_retry_attempts():
function test_codex_provider_requires_credentials (line 18) | def test_codex_provider_requires_credentials(monkeypatch):
function test_codex_provider_concatenates_multiple_system_messages (line 25) | def test_codex_provider_concatenates_multiple_system_messages(monkeypatch):
function test_codex_provider_flattens_structured_text_blocks (line 45) | def test_codex_provider_flattens_structured_text_blocks(monkeypatch):
function test_claude_provider_rejects_non_positive_retry_attempts (line 63) | def test_claude_provider_rejects_non_positive_retry_attempts():
function test_codex_provider_skips_terminal_sse_markers (line 68) | def test_codex_provider_skips_terminal_sse_markers(monkeypatch):
function test_codex_provider_skips_non_json_sse_frames (line 81) | def test_codex_provider_skips_non_json_sse_frames(monkeypatch):
function test_codex_provider_marks_invalid_tool_call_arguments (line 93) | def test_codex_provider_marks_invalid_tool_call_arguments(monkeypatch):
function test_codex_provider_parses_valid_tool_arguments (line 126) | def test_codex_provider_parses_valid_tool_arguments(monkeypatch):
FILE: backend/tests/test_client.py
function mock_app_config (line 27) | def mock_app_config():
function client (line 42) | def client(mock_app_config):
class TestClientInit (line 53) | class TestClientInit:
method test_default_params (line 54) | def test_default_params(self, client):
method test_custom_params (line 63) | def test_custom_params(self, mock_app_config):
method test_invalid_agent_name (line 78) | def test_invalid_agent_name(self, mock_app_config):
method test_custom_config_path (line 85) | def test_custom_config_path(self, mock_app_config):
method test_checkpointer_stored (line 93) | def test_checkpointer_stored(self, mock_app_config):
class TestConfigQueries (line 105) | class TestConfigQueries:
method test_list_models (line 106) | def test_list_models(self, client):
method test_list_skills (line 116) | def test_list_skills(self, client):
method test_list_skills_enabled_only (line 138) | def test_list_skills_enabled_only(self, client):
method test_get_memory (line 143) | def test_get_memory(self, client):
function _make_agent_mock (line 156) | def _make_agent_mock(chunks: list[dict]):
function _ai_events (line 163) | def _ai_events(events):
function _tool_call_events (line 168) | def _tool_call_events(events):
function _tool_result_events (line 173) | def _tool_result_events(events):
class TestStream (line 178) | class TestStream:
method test_basic_message (line 179) | def test_basic_message(self, client):
method test_context_propagation (line 201) | def test_context_propagation(self, client):
method test_tool_call_and_result (line 218) | def test_tool_call_and_result(self, client):
method test_values_event_with_title (line 242) | def test_values_event_with_title(self, client):
method test_deduplication (line 261) | def test_deduplication(self, client):
method test_auto_thread_id (line 279) | def test_auto_thread_id(self, client):
method test_list_content_blocks (line 292) | def test_list_content_blocks(self, client):
class TestChat (line 315) | class TestChat:
method test_returns_last_message (line 316) | def test_returns_last_message(self, client):
method test_empty_response (line 334) | def test_empty_response(self, client):
class TestExtractText (line 353) | class TestExtractText:
method test_string (line 354) | def test_string(self):
method test_list_text_blocks (line 357) | def test_list_text_blocks(self):
method test_list_plain_strings (line 365) | def test_list_plain_strings(self):
method test_empty_list (line 368) | def test_empty_list(self):
method test_other_type (line 371) | def test_other_type(self):
class TestEnsureAgent (line 380) | class TestEnsureAgent:
method test_creates_agent (line 381) | def test_creates_agent(self, client):
method test_uses_default_checkpointer_when_available (line 403) | def test_uses_default_checkpointer_when_available(self, client):
method test_skips_default_checkpointer_when_unconfigured (line 420) | def test_skips_default_checkpointer_when_unconfigured(self, client):
method test_reuses_agent_same_config (line 436) | def test_reuses_agent_same_config(self, client):
class TestGetModel (line 454) | class TestGetModel:
method test_found (line 455) | def test_found(self, client):
method test_not_found (line 475) | def test_not_found(self, client):
class TestMcpConfig (line 485) | class TestMcpConfig:
method test_get_mcp_config (line 486) | def test_get_mcp_config(self, client):
method test_update_mcp_config (line 499) | def test_update_mcp_config(self, client):
class TestSkillsManagement (line 541) | class TestSkillsManagement:
method _make_skill (line 542) | def _make_skill(self, name="test-skill", enabled=True):
method test_get_skill_found (line 551) | def test_get_skill_found(self, client):
method test_get_skill_not_found (line 558) | def test_get_skill_not_found(self, client):
method test_update_skill (line 563) | def test_update_skill(self, client):
method test_update_skill_not_found (line 591) | def test_update_skill_not_found(self, client):
method test_install_skill (line 596) | def test_install_skill(self, client):
method test_install_skill_not_found (line 622) | def test_install_skill_not_found(self, client):
method test_install_skill_bad_extension (line 626) | def test_install_skill_bad_extension(self, client):
class TestMemoryManagement (line 641) | class TestMemoryManagement:
method test_reload_memory (line 642) | def test_reload_memory(self, client):
method test_get_memory_config (line 648) | def test_get_memory_config(self, client):
method test_get_memory_status (line 664) | def test_get_memory_status(self, client):
class TestUploads (line 691) | class TestUploads:
method test_upload_files (line 692) | def test_upload_files(self, client):
method test_upload_files_not_found (line 713) | def test_upload_files_not_found(self, client):
method test_upload_files_rejects_directory_path (line 717) | def test_upload_files_rejects_directory_path(self, client):
method test_upload_files_reuses_single_executor_inside_event_loop (line 722) | def test_upload_files_reuses_single_executor_inside_event_loop(self, c...
method test_list_uploads (line 774) | def test_list_uploads(self, client):
method test_delete_upload (line 791) | def test_delete_upload(self, client):
method test_delete_upload_not_found (line 803) | def test_delete_upload_not_found(self, client):
method test_delete_upload_path_traversal (line 809) | def test_delete_upload_path_traversal(self, client):
class TestArtifacts (line 822) | class TestArtifacts:
method test_get_artifact (line 823) | def test_get_artifact(self, client):
method test_get_artifact_not_found (line 839) | def test_get_artifact_not_found(self, client):
method test_get_artifact_bad_prefix (line 851) | def test_get_artifact_bad_prefix(self, client):
method test_get_artifact_path_traversal (line 855) | def test_get_artifact_path_traversal(self, client):
class TestScenarioMultiTurnConversation (line 875) | class TestScenarioMultiTurnConversation:
method test_two_turn_conversation (line 878) | def test_two_turn_conversation(self, client):
method test_stream_collects_all_event_types_across_turns (line 901) | def test_stream_collects_all_event_types_across_turns(self, client):
class TestScenarioToolChain (line 952) | class TestScenarioToolChain:
method test_multi_tool_chain (line 955) | def test_multi_tool_chain(self, client):
class TestScenarioFileLifecycle (line 1002) | class TestScenarioFileLifecycle:
method test_upload_list_delete_lifecycle (line 1005) | def test_upload_list_delete_lifecycle(self, client):
method test_upload_then_read_artifact (line 1043) | def test_upload_then_read_artifact(self, client):
class TestScenarioConfigManagement (line 1075) | class TestScenarioConfigManagement:
method test_model_and_skill_discovery (line 1078) | def test_model_and_skill_discovery(self, client):
method test_mcp_update_then_skill_toggle (line 1115) | def test_mcp_update_then_skill_toggle(self, client):
class TestScenarioAgentRecreation (line 1171) | class TestScenarioAgentRecreation:
method test_different_model_triggers_rebuild (line 1174) | def test_different_model_triggers_rebuild(self, client):
method test_same_config_reuses_agent (line 1202) | def test_same_config_reuses_agent(self, client):
method test_reset_agent_forces_rebuild (line 1226) | def test_reset_agent_forces_rebuild(self, client):
method test_per_call_override_triggers_rebuild (line 1250) | def test_per_call_override_triggers_rebuild(self, client):
class TestScenarioThreadIsolation (line 1271) | class TestScenarioThreadIsolation:
method test_uploads_isolated_per_thread (line 1274) | def test_uploads_isolated_per_thread(self, client):
method test_artifacts_isolated_per_thread (line 1298) | def test_artifacts_isolated_per_thread(self, client):
class TestScenarioMemoryWorkflow (line 1320) | class TestScenarioMemoryWorkflow:
method test_memory_full_lifecycle (line 1323) | def test_memory_full_lifecycle(self, client):
class TestScenarioSkillInstallAndUse (line 1360) | class TestScenarioSkillInstallAndUse:
method test_install_then_toggle (line 1363) | def test_install_then_toggle(self, client):
class TestScenarioEdgeCases (line 1425) | class TestScenarioEdgeCases:
method test_empty_stream_response (line 1428) | def test_empty_stream_response(self, client):
method test_chat_on_empty_response (line 1443) | def test_chat_on_empty_response(self, client):
method test_multiple_title_changes (line 1455) | def test_multiple_title_changes(self, client):
method test_concurrent_tool_calls_in_single_message (line 1478) | def test_concurrent_tool_calls_in_single_message(self, client):
method test_upload_convertible_file_conversion_failure (line 1504) | def test_upload_convertible_file_conversion_failure(self, client):
class TestGatewayConformance (line 1533) | class TestGatewayConformance:
method test_list_models (line 1541) | def test_list_models(self, mock_app_config):
method test_get_model (line 1559) | def test_get_model(self, mock_app_config):
method test_list_skills (line 1578) | def test_list_skills(self, client):
method test_get_skill (line 1593) | def test_get_skill(self, client):
method test_install_skill (line 1608) | def test_install_skill(self, client, tmp_path):
method test_get_mcp_config (line 1626) | def test_get_mcp_config(self, client):
method test_update_mcp_config (line 1647) | def test_update_mcp_config(self, client, tmp_path):
method test_upload_files (line 1676) | def test_upload_files(self, client, tmp_path):
method test_get_memory_config (line 1690) | def test_get_memory_config(self, client):
method test_get_memory_status (line 1707) | def test_get_memory_status(self, client):
FILE: backend/tests/test_client_live.py
function client (line 33) | def client():
function thread_tmp (line 39) | def thread_tmp(tmp_path):
class TestLiveBasicChat (line 52) | class TestLiveBasicChat:
method test_chat_returns_nonempty_string (line 53) | def test_chat_returns_nonempty_string(self, client):
method test_chat_follows_instruction (line 60) | def test_chat_follows_instruction(self, client):
class TestLiveStreaming (line 72) | class TestLiveStreaming:
method test_stream_yields_messages_tuple_and_end (line 73) | def test_stream_yields_messages_tuple_and_end(self, client):
method test_stream_ai_content_nonempty (line 86) | def test_stream_ai_content_nonempty(self, client):
class TestLiveToolUse (line 99) | class TestLiveToolUse:
method test_agent_uses_bash_tool (line 100) | def test_agent_uses_bash_tool(self, client):
method test_agent_uses_ls_tool (line 122) | def test_agent_uses_ls_tool(self, client):
class TestLiveMultiToolChain (line 139) | class TestLiveMultiToolChain:
method test_write_then_read (line 140) | def test_write_then_read(self, client):
class TestLiveFileUpload (line 167) | class TestLiveFileUpload:
method test_upload_list_delete (line 168) | def test_upload_list_delete(self, client, thread_tmp):
method test_upload_nonexistent_file_raises (line 209) | def test_upload_nonexistent_file_raises(self, client):
class TestLiveConfigQueries (line 219) | class TestLiveConfigQueries:
method test_list_models_returns_configured_model (line 220) | def test_list_models_returns_configured_model(self, client):
method test_get_model_found (line 232) | def test_get_model_found(self, client):
method test_get_model_not_found (line 243) | def test_get_model_not_found(self, client):
method test_list_skills (line 246) | def test_list_skills(self, client):
class TestLiveArtifact (line 261) | class TestLiveArtifact:
method test_get_artifact_after_write (line 262) | def test_get_artifact_after_write(self, client):
method test_get_artifact_not_found (line 288) | def test_get_artifact_not_found(self, client):
class TestLiveOverrides (line 298) | class TestLiveOverrides:
method test_thinking_disabled_still_works (line 299) | def test_thinking_disabled_still_works(self, client):
class TestLiveErrorResilience (line 314) | class TestLiveErrorResilience:
method test_delete_nonexistent_upload (line 315) | def test_delete_nonexistent_upload(self, client):
method test_bad_artifact_path (line 319) | def test_bad_artifact_path(self, client):
method test_path_traversal_blocked (line 323) | def test_path_traversal_blocked(self, client):
FILE: backend/tests/test_config_version.py
function _make_config_files (line 14) | def _make_config_files(tmpdir: Path, user_config: dict, example_config: ...
function test_missing_version_treated_as_zero (line 35) | def test_missing_version_treated_as_zero(caplog):
function test_matching_version_no_warning (line 53) | def test_matching_version_no_warning(caplog):
function test_outdated_version_emits_warning (line 69) | def test_outdated_version_emits_warning(caplog):
function test_no_example_file_no_warning (line 87) | def test_no_example_file_no_warning(caplog):
function test_string_config_version_does_not_raise_type_error (line 100) | def test_string_config_version_does_not_raise_type_error(caplog):
function test_newer_user_version_no_warning (line 112) | def test_newer_user_version_no_warning(caplog):
FILE: backend/tests/test_credential_loader.py
function _clear_claude_code_env (line 10) | def _clear_claude_code_env(monkeypatch) -> None:
function test_load_claude_code_credential_from_direct_env (line 20) | def test_load_claude_code_credential_from_direct_env(monkeypatch):
function test_load_claude_code_credential_from_anthropic_auth_env (line 32) | def test_load_claude_code_credential_from_anthropic_auth_env(monkeypatch):
function test_load_claude_code_credential_from_file_descriptor (line 43) | def test_load_claude_code_credential_from_file_descriptor(monkeypatch):
function test_load_claude_code_credential_from_override_path (line 62) | def test_load_claude_code_credential_from_override_path(tmp_path, monkey...
function test_load_claude_code_credential_ignores_directory_path (line 86) | def test_load_claude_code_credential_ignores_directory_path(tmp_path, mo...
function test_load_claude_code_credential_falls_back_to_default_file_when_override_is_invalid (line 95) | def test_load_claude_code_credential_falls_back_to_default_file_when_ove...
function test_load_codex_cli_credential_supports_nested_tokens_shape (line 125) | def test_load_codex_cli_credential_supports_nested_tokens_shape(tmp_path...
function test_load_codex_cli_credential_supports_legacy_top_level_shape (line 147) | def test_load_codex_cli_credential_supports_legacy_top_level_shape(tmp_p...
FILE: backend/tests/test_custom_agent.py
function _make_paths (line 17) | def _make_paths(base_dir: Path):
function _write_agent (line 24) | def _write_agent(base_dir: Path, name: str, config: dict, soul: str = "Y...
class TestPaths (line 44) | class TestPaths:
method test_agents_dir (line 45) | def test_agents_dir(self, tmp_path):
method test_agent_dir (line 49) | def test_agent_dir(self, tmp_path):
method test_agent_memory_file (line 53) | def test_agent_memory_file(self, tmp_path):
method test_user_md_file (line 57) | def test_user_md_file(self, tmp_path):
method test_paths_are_different_from_global (line 61) | def test_paths_are_different_from_global(self, tmp_path):
class TestAgentConfig (line 73) | class TestAgentConfig:
method test_minimal_config (line 74) | def test_minimal_config(self):
method test_full_config (line 83) | def test_full_config(self):
method test_config_from_dict (line 96) | def test_config_from_dict(self):
class TestLoadAgentConfig (line 111) | class TestLoadAgentConfig:
method test_load_valid_config (line 112) | def test_load_valid_config(self, tmp_path):
method test_load_missing_agent_raises (line 125) | def test_load_missing_agent_raises(self, tmp_path):
method test_load_missing_config_yaml_raises (line 132) | def test_load_missing_config_yaml_raises(self, tmp_path):
method test_load_config_infers_name_from_dir (line 142) | def test_load_config_infers_name_from_dir(self, tmp_path):
method test_load_config_with_tool_groups (line 156) | def test_load_config_with_tool_groups(self, tmp_path):
method test_legacy_prompt_file_field_ignored (line 167) | def test_legacy_prompt_file_field_ignored(self, tmp_path):
class TestLoadAgentSoul (line 187) | class TestLoadAgentSoul:
method test_reads_soul_file (line 188) | def test_reads_soul_file(self, tmp_path):
method test_missing_soul_file_returns_none (line 200) | def test_missing_soul_file_returns_none(self, tmp_path):
method test_empty_soul_file_returns_none (line 214) | def test_empty_soul_file_returns_none(self, tmp_path):
class TestListCustomAgents (line 234) | class TestListCustomAgents:
method test_empty_when_no_agents_dir (line 235) | def test_empty_when_no_agents_dir(self, tmp_path):
method test_discovers_multiple_agents (line 243) | def test_discovers_multiple_agents(self, tmp_path):
method test_skips_dirs_without_config_yaml (line 256) | def test_skips_dirs_without_config_yaml(self, tmp_path):
method test_skips_non_directory_entries (line 270) | def test_skips_non_directory_entries(self, tmp_path):
method test_returns_sorted_by_name (line 285) | def test_returns_sorted_by_name(self, tmp_path):
class TestMemoryFilePath (line 304) | class TestMemoryFilePath:
method test_global_memory_path (line 305) | def test_global_memory_path(self, tmp_path):
method test_agent_memory_path (line 317) | def test_agent_memory_path(self, tmp_path):
method test_different_paths_for_different_agents (line 329) | def test_different_paths_for_different_agents(self, tmp_path):
function _make_test_app (line 351) | def _make_test_app(tmp_path: Path):
function agent_client (line 363) | def agent_client(tmp_path):
class TestAgentsAPI (line 374) | class TestAgentsAPI:
method test_list_agents_empty (line 375) | def test_list_agents_empty(self, agent_client):
method test_create_agent (line 381) | def test_create_agent(self, agent_client):
method test_create_agent_invalid_name (line 394) | def test_create_agent_invalid_name(self, agent_client):
method test_create_duplicate_agent_409 (line 399) | def test_create_duplicate_agent_409(self, agent_client):
method test_list_agents_after_create (line 407) | def test_list_agents_after_create(self, agent_client):
method test_get_agent (line 417) | def test_get_agent(self, agent_client):
method test_get_missing_agent_404 (line 426) | def test_get_missing_agent_404(self, agent_client):
method test_update_agent_soul (line 430) | def test_update_agent_soul(self, agent_client):
method test_update_agent_description (line 437) | def test_update_agent_description(self, agent_client):
method test_update_missing_agent_404 (line 444) | def test_update_missing_agent_404(self, agent_client):
method test_delete_agent (line 448) | def test_delete_agent(self, agent_client):
method test_delete_missing_agent_404 (line 458) | def test_delete_missing_agent_404(self, agent_client):
method test_create_agent_with_model_and_tool_groups (line 462) | def test_create_agent_with_model_and_tool_groups(self, agent_client):
method test_create_persists_files_on_disk (line 476) | def test_create_persists_files_on_disk(self, agent_client, tmp_path):
method test_delete_removes_files_from_disk (line 485) | def test_delete_removes_files_from_disk(self, agent_client, tmp_path):
class TestUserProfileAPI (line 499) | class TestUserProfileAPI:
method test_get_user_profile_empty (line 500) | def test_get_user_profile_empty(self, agent_client):
method test_put_user_profile (line 505) | def test_put_user_profile(self, agent_client, tmp_path):
method test_get_user_profile_after_put (line 516) | def test_get_user_profile_after_put(self, agent_client):
method test_put_empty_user_profile_returns_none (line 524) | def test_put_empty_user_profile_returns_none(self, agent_client):
FILE: backend/tests/test_docker_sandbox_mode_detection.py
function _detect_mode_with_config (line 13) | def _detect_mode_with_config(config_content: str) -> str:
function test_detect_mode_defaults_to_local_when_config_missing (line 29) | def test_detect_mode_defaults_to_local_when_config_missing():
function test_detect_mode_local_provider (line 38) | def test_detect_mode_local_provider():
function test_detect_mode_aio_without_provisioner_url (line 48) | def test_detect_mode_aio_without_provisioner_url():
function test_detect_mode_provisioner_with_url (line 58) | def test_detect_mode_provisioner_with_url():
function test_detect_mode_ignores_commented_provisioner_url (line 69) | def test_detect_mode_ignores_commented_provisioner_url():
function test_detect_mode_unknown_provider_falls_back_to_local (line 80) | def test_detect_mode_unknown_provider_falls_back_to_local():
FILE: backend/tests/test_feishu_parser.py
function test_feishu_on_message_plain_text (line 10) | def test_feishu_on_message_plain_text():
function test_feishu_on_message_rich_text (line 40) | def test_feishu_on_message_rich_text():
FILE: backend/tests/test_harness_boundary.py
function _collect_imports (line 18) | def _collect_imports(filepath: Path) -> list[tuple[int, str]]:
function test_harness_does_not_import_app (line 37) | def test_harness_does_not_import_app():
FILE: backend/tests/test_infoquest_client.py
class TestInfoQuestClient (line 10) | class TestInfoQuestClient:
method test_infoquest_client_initialization (line 11) | def test_infoquest_client_initialization(self):
method test_fetch_success (line 28) | def test_fetch_success(self, mock_post):
method test_fetch_non_200_status (line 46) | def test_fetch_non_200_status(self, mock_post):
method test_fetch_empty_response (line 59) | def test_fetch_empty_response(self, mock_post):
method test_web_search_raw_results_success (line 72) | def test_web_search_raw_results_success(self, mock_post):
method test_web_search_success (line 89) | def test_web_search_success(self, mock_post):
method test_clean_results (line 105) | def test_clean_results(self):
method test_web_search_tool (line 127) | def test_web_search_tool(self, mock_get_client):
method test_web_fetch_tool (line 140) | def test_web_fetch_tool(self, mock_get_client):
method test_get_infoquest_client (line 153) | def test_get_infoquest_client(self, mock_get_app_config):
method test_web_search_api_error (line 174) | def test_web_search_api_error(self, mock_post):
method test_clean_results_with_image_search (line 183) | def test_clean_results_with_image_search(self):
method test_clean_results_with_image_search_empty (line 192) | def test_clean_results_with_image_search_empty(self):
method test_clean_results_with_image_search_no_images (line 199) | def test_clean_results_with_image_search_no_images(self):
class TestImageSearch (line 207) | class TestImageSearch:
method test_image_search_raw_results_success (line 209) | def test_image_search_raw_results_success(self, mock_post):
method test_image_search_raw_results_with_parameters (line 226) | def test_image_search_raw_results_with_parameters(self, mock_post):
method test_image_search_raw_results_invalid_time_range (line 245) | def test_image_search_raw_results_invalid_time_range(self, mock_post):
method test_image_search_success (line 267) | def test_image_search_success(self, mock_post):
method test_image_search_with_all_parameters (line 288) | def test_image_search_with_all_parameters(self, mock_post):
method test_image_search_api_error (line 308) | def test_image_search_api_error(self, mock_post):
method test_image_search_tool (line 318) | def test_image_search_tool(self, mock_get_client):
method test_image_search_tool_with_parameters (line 336) | def test_image_search_tool_with_parameters(self, mock_get_client):
FILE: backend/tests/test_lead_agent_model_resolution.py
function _make_app_config (line 13) | def _make_app_config(models: list[ModelConfig]) -> AppConfig:
function _make_model (line 20) | def _make_model(name: str, *, supports_thinking: bool) -> ModelConfig:
function test_resolve_model_name_falls_back_to_default (line 32) | def test_resolve_model_name_falls_back_to_default(monkeypatch, caplog):
function test_resolve_model_name_uses_default_when_none (line 49) | def test_resolve_model_name_uses_default_when_none(monkeypatch):
function test_resolve_model_name_raises_when_no_models_configured (line 64) | def test_resolve_model_name_raises_when_no_models_configured(monkeypatch):
function test_make_lead_agent_disables_thinking_when_model_does_not_support_it (line 76) | def test_make_lead_agent_disables_thinking_when_model_does_not_support_i...
function test_build_middlewares_uses_resolved_model_name_for_vision (line 112) | def test_build_middlewares_uses_resolved_model_name_for_vision(monkeypat...
FILE: backend/tests/test_local_sandbox_encoding.py
function _open (line 7) | def _open(base, file, mode="r", *args, **kwargs):
function test_read_file_uses_utf8_on_windows_locale (line 13) | def test_read_file_uses_utf8_on_windows_locale(tmp_path, monkeypatch):
function test_write_file_uses_utf8_on_windows_locale (line 24) | def test_write_file_uses_utf8_on_windows_locale(tmp_path, monkeypatch):
FILE: backend/tests/test_loop_detection_middleware.py
function _make_runtime (line 14) | def _make_runtime(thread_id="test-thread"):
function _make_state (line 21) | def _make_state(tool_calls=None, content=""):
function _bash_call (line 27) | def _bash_call(cmd="ls"):
class TestHashToolCalls (line 31) | class TestHashToolCalls:
method test_same_calls_same_hash (line 32) | def test_same_calls_same_hash(self):
method test_different_calls_different_hash (line 37) | def test_different_calls_different_hash(self):
method test_order_independent (line 42) | def test_order_independent(self):
method test_empty_calls (line 47) | def test_empty_calls(self):
class TestLoopDetection (line 53) | class TestLoopDetection:
method test_no_tool_calls_returns_none (line 54) | def test_no_tool_calls_returns_none(self):
method test_below_threshold_returns_none (line 61) | def test_below_threshold_returns_none(self):
method test_warn_at_threshold (line 71) | def test_warn_at_threshold(self):
method test_warn_only_injected_once (line 87) | def test_warn_only_injected_once(self):
method test_hard_stop_at_limit (line 106) | def test_hard_stop_at_limit(self):
method test_different_calls_dont_trigger (line 124) | def test_different_calls_dont_trigger(self):
method test_window_sliding (line 133) | def test_window_sliding(self):
method test_reset_clears_state (line 150) | def test_reset_clears_state(self):
method test_non_ai_message_ignored (line 163) | def test_non_ai_message_ignored(self):
method test_empty_messages_ignored (line 170) | def test_empty_messages_ignored(self):
method test_thread_id_from_runtime_context (line 176) | def test_thread_id_from_runtime_context(self):
method test_lru_eviction (line 198) | def test_lru_eviction(self):
method test_thread_safe_mutations (line 216) | def test_thread_safe_mutations(self):
method test_fallback_thread_id_when_missing (line 223) | def test_fallback_thread_id_when_missing(self):
FILE: backend/tests/test_mcp_client_config.py
function test_build_server_params_stdio_success (line 9) | def test_build_server_params_stdio_success():
function test_build_server_params_stdio_requires_command (line 27) | def test_build_server_params_stdio_requires_command():
function test_build_server_params_http_like_success (line 35) | def test_build_server_params_http_like_success(transport: str):
function test_build_server_params_http_like_requires_url (line 52) | def test_build_server_params_http_like_requires_url(transport: str):
function test_build_server_params_rejects_unsupported_transport (line 59) | def test_build_server_params_rejects_unsupported_transport():
function test_build_servers_config_returns_empty_when_no_enabled_servers (line 66) | def test_build_servers_config_returns_empty_when_no_enabled_servers():
function test_build_servers_config_skips_invalid_server_and_keeps_valid_ones (line 78) | def test_build_servers_config_skips_invalid_server_and_keeps_valid_ones():
FILE: backend/tests/test_mcp_oauth.py
class _MockResponse (line 12) | class _MockResponse:
method __init__ (line 13) | def __init__(self, payload: dict[str, Any]):
method raise_for_status (line 16) | def raise_for_status(self) -> None:
method json (line 19) | def json(self) -> dict[str, Any]:
class _MockAsyncClient (line 23) | class _MockAsyncClient:
method __init__ (line 24) | def __init__(self, payload: dict[str, Any], post_calls: list[dict[str,...
method __aenter__ (line 28) | async def __aenter__(self):
method __aexit__ (line 31) | async def __aexit__(self, exc_type, exc, tb):
method post (line 34) | async def post(self, url: str, data: dict[str, Any]):
function test_oauth_token_manager_fetches_and_caches_token (line 39) | def test_oauth_token_manager_fetches_and_caches_token(monkeypatch):
function test_build_oauth_interceptor_injects_authorization_header (line 86) | def test_build_oauth_interceptor_injects_authorization_header(monkeypatch):
function test_get_initial_oauth_headers (line 148) | def test_get_initial_oauth_headers(monkeypatch):
FILE: backend/tests/test_memory_prompt_injection.py
function test_format_memory_includes_facts_section (line 8) | def test_format_memory_includes_facts_section() -> None:
function test_format_memory_sorts_facts_by_confidence_desc (line 25) | def test_format_memory_sorts_facts_by_confidence_desc() -> None:
function test_format_memory_respects_budget_when_adding_facts (line 40) | def test_format_memory_respects_budget_when_adding_facts(monkeypatch) ->...
function test_coerce_confidence_nan_falls_back_to_default (line 71) | def test_coerce_confidence_nan_falls_back_to_default() -> None:
function test_coerce_confidence_inf_falls_back_to_default (line 77) | def test_coerce_confidence_inf_falls_back_to_default() -> None:
function test_coerce_confidence_valid_values_are_clamped (line 83) | def test_coerce_confidence_valid_values_are_clamped() -> None:
function test_format_memory_skips_none_content_facts (line 90) | def test_format_memory_skips_none_content_facts() -> None:
function test_format_memory_skips_non_string_content_facts (line 105) | def test_format_memory_skips_non_string_content_facts() -> None:
FILE: backend/tests/test_memory_updater.py
function _make_memory (line 8) | def _make_memory(facts: list[dict[str, object]] | None = None) -> dict[s...
function _memory_config (line 26) | def _memory_config(**overrides: object) -> MemoryConfig:
function test_apply_updates_skips_existing_duplicate_and_preserves_removals (line 33) | def test_apply_updates_skips_existing_duplicate_and_preserves_removals()...
function test_apply_updates_skips_same_batch_duplicates_and_keeps_source_metadata (line 72) | def test_apply_updates_skips_same_batch_duplicates_and_keeps_source_meta...
function test_apply_updates_preserves_threshold_and_max_facts_trimming (line 97) | def test_apply_updates_preserves_threshold_and_max_facts_trimming() -> N...
class TestExtractText (line 146) | class TestExtractText:
method test_string_passthrough (line 149) | def test_string_passthrough(self):
method test_list_single_text_block (line 152) | def test_list_single_text_block(self):
method test_list_multiple_text_blocks_joined (line 155) | def test_list_multiple_text_blocks_joined(self):
method test_list_plain_strings (line 162) | def test_list_plain_strings(self):
method test_list_string_chunks_join_without_separator (line 165) | def test_list_string_chunks_join_without_separator(self):
method test_list_mixed_strings_and_blocks (line 169) | def test_list_mixed_strings_and_blocks(self):
method test_list_adjacent_string_chunks_then_block (line 176) | def test_list_adjacent_string_chunks_then_block(self):
method test_list_skips_non_text_blocks (line 184) | def test_list_skips_non_text_blocks(self):
method test_empty_list (line 191) | def test_empty_list(self):
method test_list_no_text_blocks (line 194) | def test_list_no_text_blocks(self):
method test_non_str_non_list (line 197) | def test_non_str_non_list(self):
class TestFormatConversationForUpdate (line 206) | class TestFormatConversationForUpdate:
method test_plain_string_messages (line 207) | def test_plain_string_messages(self):
method test_list_content_with_plain_strings (line 220) | def test_list_content_with_plain_strings(self):
class TestUpdateMemoryStructuredResponse (line 236) | class TestUpdateMemoryStructuredResponse:
method _make_mock_model (line 239) | def _make_mock_model(self, content):
method test_string_response_parses (line 246) | def test_string_response_parses(self):
method test_list_content_response_parses (line 267) | def test_list_content_response_parses(self):
FILE: backend/tests/test_memory_upload_filtering.py
function _human (line 22) | def _human(text: str) -> HumanMessage:
function _ai (line 26) | def _ai(text: str, tool_calls=None) -> AIMessage:
class TestFilterMessagesForMemory (line 38) | class TestFilterMessagesForMemory:
method test_upload_only_turn_is_excluded (line 41) | def test_upload_only_turn_is_excluded(self):
method test_upload_with_real_question_preserves_question (line 51) | def test_upload_with_real_question_preserves_question(self):
method test_plain_conversation_passes_through (line 69) | def test_plain_conversation_passes_through(self):
method test_tool_messages_are_excluded (line 79) | def test_tool_messages_are_excluded(self):
method test_multi_turn_with_upload_in_middle (line 94) | def test_multi_turn_with_upload_in_middle(self):
method test_multimodal_content_list_handled (line 116) | def test_multimodal_content_list_handled(self):
method test_file_path_not_in_filtered_content (line 127) | def test_file_path_not_in_filtered_content(self):
class TestStripUploadMentionsFromMemory (line 142) | class TestStripUploadMentionsFromMemory:
method _make_memory (line 143) | def _make_memory(self, summary: str, facts: list[dict] | None = None) ...
method test_upload_event_sentence_removed_from_summary (line 152) | def test_upload_event_sentence_removed_from_summary(self):
method test_upload_path_sentence_removed_from_summary (line 160) | def test_upload_path_sentence_removed_from_summary(self):
method test_legitimate_csv_mention_is_preserved (line 167) | def test_legitimate_csv_mention_is_preserved(self):
method test_pdf_export_preference_preserved (line 173) | def test_pdf_export_preference_preserved(self):
method test_uploading_a_test_file_removed (line 179) | def test_uploading_a_test_file_removed(self):
method test_upload_fact_removed_from_facts (line 189) | def test_upload_fact_removed_from_facts(self):
method test_non_upload_facts_preserved (line 202) | def test_non_upload_facts_preserved(self):
method test_empty_memory_handled_gracefully (line 211) | def test_empty_memory_handled_gracefully(self):
FILE: backend/tests/test_model_config.py
function _make_model (line 4) | def _make_model(**overrides) -> ModelConfig:
function test_responses_api_fields_are_declared_in_model_schema (line 15) | def test_responses_api_fields_are_declared_in_model_schema():
function test_responses_api_fields_round_trip_in_model_dump (line 20) | def test_responses_api_fields_round_trip_in_model_dump():
FILE: backend/tests/test_model_factory.py
function _make_app_config (line 19) | def _make_app_config(models: list[ModelConfig]) -> AppConfig:
function _make_model (line 26) | def _make_model(
class FakeChatModel (line 51) | class FakeChatModel(BaseChatModel):
method __init__ (line 56) | def __init__(self, **kwargs):
method _llm_type (line 62) | def _llm_type(self) -> str:
method _generate (line 65) | def _generate(self, *args, **kwargs): # type: ignore[override]
method _stream (line 68) | def _stream(self, *args, **kwargs): # type: ignore[override]
function _patch_factory (line 72) | def _patch_factory(monkeypatch, app_config: AppConfig, model_class=FakeC...
function test_uses_first_model_when_name_is_none (line 84) | def test_uses_first_model_when_name_is_none(monkeypatch):
function test_raises_when_model_not_found (line 95) | def test_raises_when_model_not_found(monkeypatch):
function test_thinking_enabled_raises_when_not_supported_but_when_thinking_enabled_is_set (line 109) | def test_thinking_enabled_raises_when_not_supported_but_when_thinking_en...
function test_thinking_enabled_raises_for_empty_when_thinking_enabled_explicitly_set (line 120) | def test_thinking_enabled_raises_for_empty_when_thinking_enabled_explici...
function test_thinking_enabled_merges_when_thinking_enabled_settings (line 131) | def test_thinking_enabled_merges_when_thinking_enabled_settings(monkeypa...
function test_thinking_disabled_openai_gateway_format (line 148) | def test_thinking_disabled_openai_gateway_format(monkeypatch):
function test_thinking_disabled_langchain_anthropic_format (line 180) | def test_thinking_disabled_langchain_anthropic_format(monkeypatch):
function test_thinking_disabled_no_when_thinking_enabled_does_nothing (line 214) | def test_thinking_disabled_no_when_thinking_enabled_does_nothing(monkeyp...
function test_reasoning_effort_cleared_when_not_supported (line 241) | def test_reasoning_effort_cleared_when_not_supported(monkeypatch):
function test_reasoning_effort_preserved_when_supported (line 259) | def test_reasoning_effort_preserved_when_supported(monkeypatch):
function test_thinking_shortcut_enables_thinking_when_thinking_enabled (line 294) | def test_thinking_shortcut_enables_thinking_when_thinking_enabled(monkey...
function test_thinking_shortcut_disables_thinking_when_thinking_disabled (line 323) | def test_thinking_shortcut_disables_thinking_when_thinking_disabled(monk...
function test_thinking_shortcut_merges_with_when_thinking_enabled (line 354) | def test_thinking_shortcut_merges_with_when_thinking_enabled(monkeypatch):
function test_thinking_shortcut_not_leaked_into_model_when_disabled (line 387) | def test_thinking_shortcut_not_leaked_into_model_when_disabled(monkeypat...
function test_openai_compatible_provider_passes_base_url (line 423) | def test_openai_compatible_provider_passes_base_url(monkeypatch):
function test_openai_compatible_provider_multiple_models (line 459) | def test_openai_compatible_provider_multiple_models(monkeypatch):
class FakeCodexChatModel (line 511) | class FakeCodexChatModel(FakeChatModel):
function test_codex_provider_disables_reasoning_when_thinking_disabled (line 515) | def test_codex_provider_disables_reasoning_when_thinking_disabled(monkey...
function test_codex_provider_preserves_explicit_reasoning_effort (line 535) | def test_codex_provider_preserves_explicit_reasoning_effort(monkeypatch):
function test_codex_provider_defaults_reasoning_effort_to_medium (line 555) | def test_codex_provider_defaults_reasoning_effort_to_medium(monkeypatch):
function test_codex_provider_strips_unsupported_max_tokens (line 575) | def test_codex_provider_strips_unsupported_max_tokens(monkeypatch):
function test_openai_responses_api_settings_are_passed_to_chatopenai (line 596) | def test_openai_responses_api_settings_are_passed_to_chatopenai(monkeypa...
FILE: backend/tests/test_patched_minimax.py
function _make_model (line 6) | def _make_model(**kwargs) -> PatchedChatMiniMax:
function test_get_request_payload_preserves_thinking_and_forces_reasoning_split (line 15) | def test_get_request_payload_preserves_thinking_and_forces_reasoning_spl...
function test_create_chat_result_maps_reasoning_details_to_reasoning_content (line 24) | def test_create_chat_result_maps_reasoning_details_to_reasoning_content():
function test_create_chat_result_strips_inline_think_tags (line 56) | def test_create_chat_result_strips_inline_think_tags():
function test_convert_chunk_to_generation_chunk_preserves_reasoning_deltas (line 79) | def test_convert_chunk_to_generation_chunk_preserves_reasoning_deltas():
FILE: backend/tests/test_present_file_tool_core_logic.py
function _make_runtime (line 9) | def _make_runtime(outputs_path: str) -> SimpleNamespace:
function test_present_files_normalizes_host_outputs_path (line 16) | def test_present_files_normalizes_host_outputs_path(tmp_path):
function test_present_files_keeps_virtual_outputs_path (line 32) | def test_present_files_keeps_virtual_outputs_path(tmp_path, monkeypatch):
function test_present_files_rejects_paths_outside_outputs (line 53) | def test_present_files_rejects_paths_outside_outputs(tmp_path):
FILE: backend/tests/test_provisioner_kubeconfig.py
function _load_provisioner_module (line 9) | def _load_provisioner_module():
function test_wait_for_kubeconfig_rejects_directory (line 21) | def test_wait_for_kubeconfig_rejects_directory(tmp_path):
function test_wait_for_kubeconfig_accepts_file (line 36) | def test_wait_for_kubeconfig_accepts_file(tmp_path):
function test_init_k8s_client_rejects_directory_path (line 48) | def test_init_k8s_client_rejects_directory_path(tmp_path):
function test_init_k8s_client_uses_file_kubeconfig (line 63) | def test_init_k8s_client_uses_file_kubeconfig(tmp_path, monkeypatch):
function test_init_k8s_client_falls_back_to_incluster_when_missing (line 93) | def test_init_k8s_client_falls_back_to_incluster_when_missing(tmp_path, ...
FILE: backend/tests/test_readability.py
function test_extract_article_falls_back_when_readability_js_fails (line 10) | def test_extract_article_falls_back_when_readability_js_fails(monkeypatch):
function test_extract_article_re_raises_unexpected_exception (line 37) | def test_extract_article_re_raises_unexpected_exception(monkeypatch):
FILE: backend/tests/test_reflection_resolvers.py
function test_resolve_variable_reports_install_hint_for_missing_google_provider (line 9) | def test_resolve_variable_reports_install_hint_for_missing_google_provid...
function test_resolve_variable_reports_install_hint_for_missing_google_transitive_dependency (line 25) | def test_resolve_variable_reports_install_hint_for_missing_google_transi...
function test_resolve_variable_invalid_path_format (line 44) | def test_resolve_variable_invalid_path_format():
FILE: backend/tests/test_sandbox_tools_security.py
function test_replace_virtual_path_maps_virtual_root_and_subpaths (line 29) | def test_replace_virtual_path_maps_virtual_root_and_subpaths() -> None:
function test_mask_local_paths_in_output_hides_host_paths (line 40) | def test_mask_local_paths_in_output_hides_host_paths() -> None:
function test_mask_local_paths_in_output_hides_skills_host_paths (line 48) | def test_mask_local_paths_in_output_hides_skills_host_paths() -> None:
function test_reject_path_traversal_blocks_dotdot (line 64) | def test_reject_path_traversal_blocks_dotdot() -> None:
function test_reject_path_traversal_blocks_dotdot_at_start (line 69) | def test_reject_path_traversal_blocks_dotdot_at_start() -> None:
function test_reject_path_traversal_blocks_backslash_dotdot (line 74) | def test_reject_path_traversal_blocks_backslash_dotdot() -> None:
function test_reject_path_traversal_allows_normal_paths (line 79) | def test_reject_path_traversal_allows_normal_paths() -> None:
function test_validate_local_tool_path_rejects_non_virtual_path (line 89) | def test_validate_local_tool_path_rejects_non_virtual_path() -> None:
function test_validate_local_tool_path_rejects_bare_virtual_root (line 94) | def test_validate_local_tool_path_rejects_bare_virtual_root() -> None:
function test_validate_local_tool_path_allows_user_data_paths (line 100) | def test_validate_local_tool_path_allows_user_data_paths() -> None:
function test_validate_local_tool_path_allows_user_data_write (line 107) | def test_validate_local_tool_path_allows_user_data_write() -> None:
function test_validate_local_tool_path_rejects_traversal_in_user_data (line 112) | def test_validate_local_tool_path_rejects_traversal_in_user_data() -> None:
function test_validate_local_tool_path_rejects_traversal_in_skills (line 118) | def test_validate_local_tool_path_rejects_traversal_in_skills() -> None:
function test_validate_local_tool_path_rejects_none_thread_data (line 125) | def test_validate_local_tool_path_rejects_none_thread_data() -> None:
function test_resolve_skills_path_resolves_correctly (line 136) | def test_resolve_skills_path_resolves_correctly() -> None:
function test_resolve_skills_path_resolves_root (line 146) | def test_resolve_skills_path_resolves_root() -> None:
function test_resolve_skills_path_raises_when_not_configured (line 156) | def test_resolve_skills_path_raises_when_not_configured() -> None:
function test_resolve_and_validate_user_data_path_resolves_correctly (line 169) | def test_resolve_and_validate_user_data_path_resolves_correctly(tmp_path...
function test_resolve_and_validate_user_data_path_blocks_traversal (line 182) | def test_resolve_and_validate_user_data_path_blocks_traversal(tmp_path: ...
function test_replace_virtual_paths_in_command_replaces_skills_paths (line 199) | def test_replace_virtual_paths_in_command_replaces_skills_paths() -> None:
function test_replace_virtual_paths_in_command_replaces_both (line 211) | def test_replace_virtual_paths_in_command_replaces_both() -> None:
function test_validate_local_bash_command_paths_blocks_host_paths (line 228) | def test_validate_local_bash_command_paths_blocks_host_paths() -> None:
function test_validate_local_bash_command_paths_allows_virtual_and_system_paths (line 233) | def test_validate_local_bash_command_paths_allows_virtual_and_system_pat...
function test_validate_local_bash_command_paths_blocks_traversal_in_user_data (line 240) | def test_validate_local_bash_command_paths_blocks_traversal_in_user_data...
function test_validate_local_bash_command_paths_blocks_traversal_in_skills (line 249) | def test_validate_local_bash_command_paths_blocks_traversal_in_skills() ...
function test_is_skills_path_recognises_default_prefix (line 262) | def test_is_skills_path_recognises_default_prefix() -> None:
function test_validate_local_tool_path_allows_skills_read_only (line 270) | def test_validate_local_tool_path_allows_skills_read_only() -> None:
function test_validate_local_tool_path_blocks_skills_write (line 281) | def test_validate_local_tool_path_blocks_skills_write() -> None:
function test_validate_local_bash_command_paths_allows_skills_path (line 292) | def test_validate_local_bash_command_paths_allows_skills_path() -> None:
function test_validate_local_bash_command_paths_still_blocks_other_paths (line 301) | def test_validate_local_bash_command_paths_still_blocks_other_paths() ->...
function test_validate_local_tool_path_skills_custom_container_path (line 308) | def test_validate_local_tool_path_skills_custom_container_path() -> None:
FILE: backend/tests/test_serialize_message_content.py
class TestSerializeToolMessageContent (line 18) | class TestSerializeToolMessageContent:
method test_string_content (line 21) | def test_string_content(self):
method test_list_of_blocks_content (line 27) | def test_list_of_blocks_content(self):
method test_multiple_text_blocks (line 40) | def test_multiple_text_blocks(self):
method test_string_chunks_are_joined_without_newlines (line 53) | def test_string_chunks_are_joined_without_newlines(self):
method test_mixed_string_chunks_and_blocks (line 63) | def test_mixed_string_chunks_and_blocks(self):
method test_mixed_blocks_with_non_text (line 73) | def test_mixed_blocks_with_non_text(self):
method test_empty_list_content (line 86) | def test_empty_list_content(self):
method test_plain_string_in_list (line 91) | def test_plain_string_in_list(self):
method test_unknown_content_type_falls_back (line 101) | def test_unknown_content_type_falls_back(self):
class TestExtractText (line 114) | class TestExtractText:
method test_string_passthrough (line 117) | def test_string_passthrough(self):
method test_list_text_blocks (line 120) | def test_list_text_blocks(self):
method test_empty_list (line 125) | def test_empty_list(self):
method test_fallback_non_iterable (line 128) | def test_fallback_non_iterable(self):
FILE: backend/tests/test_skills_archive_root.py
function _write_skill (line 8) | def _write_skill(skill_dir: Path) -> None:
function test_resolve_skill_dir_ignores_macosx_wrapper (line 22) | def test_resolve_skill_dir_ignores_macosx_wrapper(tmp_path: Path) -> None:
function test_resolve_skill_dir_ignores_hidden_top_level_entries (line 29) | def test_resolve_skill_dir_ignores_hidden_top_level_entries(tmp_path: Pa...
function test_resolve_skill_dir_rejects_archive_with_only_metadata (line 36) | def test_resolve_skill_dir_rejects_archive_with_only_metadata(tmp_path: ...
FILE: backend/tests/test_skills_loader.py
function _write_skill (line 8) | def _write_skill(skill_dir: Path, name: str, description: str) -> None:
function test_get_skills_root_path_points_to_project_root_skills (line 15) | def test_get_skills_root_path_points_to_project_root_skills():
function test_load_skills_discovers_nested_skills_and_sets_container_paths (line 24) | def test_load_skills_discovers_nested_skills_and_sets_container_paths(tm...
function test_load_skills_skips_hidden_directories (line 51) | def test_load_skills_skips_hidden_directories(tmp_path: Path):
FILE: backend/tests/test_skills_router.py
function _write_skill (line 13) | def _write_skill(skill_dir: Path, frontmatter: str) -> None:
function test_validate_skill_frontmatter_allows_standard_optional_metadata (line 18) | def test_validate_skill_frontmatter_allows_standard_optional_metadata(tm...
function test_validate_skill_frontmatter_still_rejects_unknown_keys (line 42) | def test_validate_skill_frontmatter_still_rejects_unknown_keys(tmp_path:...
function test_validate_skill_frontmatter_reads_utf8_on_windows_locale (line 63) | def test_validate_skill_frontmatter_reads_utf8_on_windows_locale(tmp_pat...
FILE: backend/tests/test_subagent_executor.py
function _setup_executor_classes (line 35) | def _setup_executor_classes():
class MockHumanMessage (line 90) | class MockHumanMessage:
method __init__ (line 93) | def __init__(self, content, _classes=None):
method _get_real (line 97) | def _get_real(self):
class MockAIMessage (line 101) | class MockAIMessage:
method __init__ (line 104) | def __init__(self, content, msg_id=None, _classes=None):
method _get_real (line 109) | def _get_real(self):
function async_iterator (line 116) | async def async_iterator(items):
function classes (line 128) | def classes(_setup_executor_classes):
function base_config (line 134) | def base_config(classes):
function mock_agent (line 146) | def mock_agent():
class _MsgHelper (line 154) | class _MsgHelper:
method __init__ (line 157) | def __init__(self, classes):
method human (line 160) | def human(self, content):
method ai (line 163) | def ai(self, content, msg_id=None):
function msg (line 171) | def msg(classes):
class TestAsyncExecutionPath (line 181) | class TestAsyncExecutionPath:
method test_aexecute_success (line 185) | async def test_aexecute_success(self, classes, base_config, mock_agent...
method test_aexecute_collects_ai_messages (line 216) | async def test_aexecute_collects_ai_messages(self, classes, base_confi...
method test_aexecute_handles_duplicate_messages (line 244) | async def test_aexecute_handles_duplicate_messages(self, classes, base...
method test_aexecute_handles_list_content (line 268) | async def test_aexecute_handles_list_content(self, classes, base_confi...
method test_aexecute_handles_agent_exception (line 296) | async def test_aexecute_handles_agent_exception(self, classes, base_co...
method test_aexecute_no_final_state (line 317) | async def test_aexecute_no_final_state(self, classes, base_config, moc...
method test_aexecute_no_ai_message_in_state (line 337) | async def test_aexecute_no_ai_message_in_state(self, classes, base_con...
class TestSyncExecutionPath (line 364) | class TestSyncExecutionPath:
method test_execute_runs_async_in_event_loop (line 367) | def test_execute_runs_async_in_event_loop(self, classes, base_config, ...
method test_execute_in_thread_pool_context (line 393) | def test_execute_in_thread_pool_context(self, classes, base_config, msg):
method test_execute_handles_asyncio_run_failure (line 433) | def test_execute_handles_asyncio_run_failure(self, classes, base_config):
method test_execute_with_result_holder (line 453) | def test_execute_with_result_holder(self, classes, base_config, mock_a...
class TestAsyncToolSupport (line 492) | class TestAsyncToolSupport:
method test_async_tool_called_in_astream (line 496) | async def test_async_tool_called_in_astream(self, classes, base_config...
method test_sync_execute_with_async_tools (line 538) | def test_sync_execute_with_async_tools(self, classes, base_config, msg):
class TestThreadSafety (line 581) | class TestThreadSafety:
method test_multiple_executors_in_parallel (line 584) | def test_multiple_executors_in_parallel(self, classes, base_config, msg):
class TestCleanupBackgroundTask (line 635) | class TestCleanupBackgroundTask:
method executor_module (line 639) | def executor_module(self, _setup_executor_classes):
method test_cleanup_removes_terminal_completed_task (line 648) | def test_cleanup_removes_terminal_completed_task(self, executor_module...
method test_cleanup_removes_terminal_failed_task (line 669) | def test_cleanup_removes_terminal_failed_task(self, executor_module, c...
method test_cleanup_removes_terminal_timed_out_task (line 688) | def test_cleanup_removes_terminal_timed_out_task(self, executor_module...
method test_cleanup_skips_running_task (line 707) | def test_cleanup_skips_running_task(self, executor_module, classes):
method test_cleanup_skips_pending_task (line 730) | def test_cleanup_skips_pending_task(self, executor_module, classes):
method test_cleanup_handles_unknown_task_gracefully (line 747) | def test_cleanup_handles_unknown_task_gracefully(self, executor_module):
method test_cleanup_removes_task_with_completed_at_even_if_running (line 752) | def test_cleanup_removes_task_with_completed_at_even_if_running(self, ...
FILE: backend/tests/test_subagent_timeout_config.py
function _reset_subagents_config (line 27) | def _reset_subagents_config(timeout_seconds: int = 900, agents: dict | N...
class TestSubagentOverrideConfig (line 37) | class TestSubagentOverrideConfig:
method test_default_is_none (line 38) | def test_default_is_none(self):
method test_explicit_value (line 42) | def test_explicit_value(self):
method test_rejects_zero (line 46) | def test_rejects_zero(self):
method test_rejects_negative (line 50) | def test_rejects_negative(self):
method test_minimum_valid_value (line 54) | def test_minimum_valid_value(self):
class TestSubagentsAppConfigDefaults (line 64) | class TestSubagentsAppConfigDefaults:
method test_default_timeout (line 65) | def test_default_timeout(self):
method test_default_agents_empty (line 69) | def test_default_agents_empty(self):
method test_custom_global_timeout (line 73) | def test_custom_global_timeout(self):
method test_rejects_zero_timeout (line 77) | def test_rejects_zero_timeout(self):
method test_rejects_negative_timeout (line 81) | def test_rejects_negative_timeout(self):
class TestGetTimeoutFor (line 91) | class TestGetTimeoutFor:
method test_returns_global_default_when_no_override (line 92) | def test_returns_global_default_when_no_override(self):
method test_returns_per_agent_override_when_set (line 98) | def test_returns_per_agent_override_when_set(self):
method test_other_agents_still_use_global_default (line 105) | def test_other_agents_still_use_global_default(self):
method test_agent_with_none_override_falls_back_to_global (line 112) | def test_agent_with_none_override_falls_back_to_global(self):
method test_multiple_per_agent_overrides (line 119) | def test_multiple_per_agent_overrides(self):
class TestLoadSubagentsConfig (line 136) | class TestLoadSubagentsConfig:
method teardown_method (line 137) | def teardown_method(self):
method test_load_global_timeout (line 141) | def test_load_global_timeout(self):
method test_load_with_per_agent_overrides (line 145) | def test_load_with_per_agent_overrides(self):
method test_load_partial_override (line 159) | def test_load_partial_override(self):
method test_load_empty_dict_uses_defaults (line 170) | def test_load_empty_dict_uses_defaults(self):
method test_load_replaces_previous_config (line 176) | def test_load_replaces_previous_config(self):
method test_singleton_returns_same_instance_between_calls (line 183) | def test_singleton_returns_same_instance_between_calls(self):
class TestRegistryGetSubagentConfig (line 193) | class TestRegistryGetSubagentConfig:
method teardown_method (line 194) | def teardown_method(self):
method test_returns_none_for_unknown_agent (line 197) | def test_returns_none_for_unknown_agent(self):
method test_returns_config_for_builtin_agents (line 202) | def test_returns_config_for_builtin_agents(self):
method test_default_timeout_preserved_when_no_config (line 208) | def test_default_timeout_preserved_when_no_config(self):
method test_global_timeout_override_applied (line 215) | def test_global_timeout_override_applied(self):
method test_per_agent_timeout_override_applied (line 222) | def test_per_agent_timeout_override_applied(self):
method test_per_agent_override_does_not_affect_other_agents (line 234) | def test_per_agent_override_does_not_affect_other_agents(self):
method test_builtin_config_object_is_not_mutated (line 246) | def test_builtin_config_object_is_not_mutated(self):
method test_config_preserves_other_fields (line 258) | def test_config_preserves_other_fields(self):
class TestRegistryListSubagents (line 280) | class TestRegistryListSubagents:
method teardown_method (line 281) | def teardown_method(self):
method test_lists_both_builtin_agents (line 284) | def test_lists_both_builtin_agents(self):
method test_all_returned_configs_get_global_override (line 291) | def test_all_returned_configs_get_global_override(self):
method test_per_agent_overrides_reflected_in_list (line 298) | def test_per_agent_overrides_reflected_in_list(self):
class TestPollingTimeoutCalculation (line 320) | class TestPollingTimeoutCalculation:
method test_polling_timeout_formula (line 333) | def test_polling_timeout_formula(self, timeout_seconds: int, expected_...
method test_polling_timeout_exceeds_execution_timeout (line 343) | def test_polling_timeout_exceeds_execution_timeout(self):
FILE: backend/tests/test_suggestions_router.py
function test_strip_markdown_code_fence_removes_wrapping (line 7) | def test_strip_markdown_code_fence_removes_wrapping():
function test_strip_markdown_code_fence_no_fence_keeps_content (line 12) | def test_strip_markdown_code_fence_no_fence_keeps_content():
function test_parse_json_string_list_filters_invalid_items (line 17) | def test_parse_json_string_list_filters_invalid_items():
function test_parse_json_string_list_rejects_non_list (line 22) | def test_parse_json_string_list_rejects_non_list():
function test_format_conversation_formats_roles (line 27) | def test_format_conversation_formats_roles():
function test_generate_suggestions_parses_and_limits (line 36) | def test_generate_suggestions_parses_and_limits(monkeypatch):
function test_generate_suggestions_parses_list_block_content (line 54) | def test_generate_suggestions_parses_list_block_content(monkeypatch):
function test_generate_suggestions_parses_output_text_block_content (line 72) | def test_generate_suggestions_parses_output_text_block_content(monkeypat...
function test_generate_suggestions_returns_empty_on_model_error (line 90) | def test_generate_suggestions_returns_empty_on_model_error(monkeypatch):
FILE: backend/tests/test_task_tool_core_logic.py
class FakeSubagentStatus (line 14) | class FakeSubagentStatus(Enum):
function _make_runtime (line 23) | def _make_runtime() -> SimpleNamespace:
function _make_subagent_config (line 39) | def _make_subagent_config() -> SubagentConfig:
function _make_result (line 49) | def _make_result(
function test_task_tool_returns_error_for_unknown_subagent (line 64) | def test_task_tool_returns_error_for_unknown_subagent(monkeypatch):
function test_task_tool_emits_running_and_completed_events (line 78) | def test_task_tool_emits_running_and_completed_events(monkeypatch):
function test_task_tool_returns_failed_message (line 140) | def test_task_tool_returns_failed_message(monkeypatch):
function test_task_tool_returns_timed_out_message (line 174) | def test_task_tool_returns_timed_out_message(monkeypatch):
function test_task_tool_polling_safety_timeout (line 208) | def test_task_tool_polling_safety_timeout(monkeypatch):
function test_cleanup_called_on_completed (line 244) | def test_cleanup_called_on_completed(monkeypatch):
function test_cleanup_called_on_failed (line 284) | def test_cleanup_called_on_failed(monkeypatch):
function test_cleanup_called_on_timed_out (line 324) | def test_cleanup_called_on_timed_out(monkeypatch):
function test_cleanup_not_called_on_polling_safety_timeout (line 364) | def test_cleanup_not_called_on_polling_safety_timeout(monkeypatch):
FILE: backend/tests/test_thread_data_middleware.py
class TestThreadDataMiddleware (line 7) | class TestThreadDataMiddleware:
method test_before_agent_returns_paths_when_thread_id_present_in_context (line 8) | def test_before_agent_returns_paths_when_thread_id_present_in_context(...
method test_before_agent_uses_thread_id_from_configurable_when_context_is_none (line 18) | def test_before_agent_uses_thread_id_from_configurable_when_context_is...
method test_before_agent_uses_thread_id_from_configurable_when_context_missing_thread_id (line 32) | def test_before_agent_uses_thread_id_from_configurable_when_context_mi...
method test_before_agent_raises_clear_error_when_thread_id_missing_everywhere (line 46) | def test_before_agent_raises_clear_error_when_thread_id_missing_everyw...
FILE: backend/tests/test_title_generation.py
class TestTitleConfig (line 9) | class TestTitleConfig:
method test_default_config (line 12) | def test_default_config(self):
method test_custom_config (line 20) | def test_custom_config(self):
method test_config_validation (line 33) | def test_config_validation(self):
method test_get_set_config (line 47) | def test_get_set_config(self):
class TestTitleMiddleware (line 63) | class TestTitleMiddleware:
method test_middleware_initialization (line 66) | def test_middleware_initialization(self):
FILE: backend/tests/test_title_middleware_core_logic.py
function _clone_title_config (line 12) | def _clone_title_config(config: TitleConfig) -> TitleConfig:
function _set_test_title_config (line 17) | def _set_test_title_config(**overrides) -> TitleConfig:
class TestTitleMiddlewareCoreLogic (line 25) | class TestTitleMiddlewareCoreLogic:
method setup_method (line 26) | def setup_method(self):
method teardown_method (line 30) | def teardown_method(self):
method test_should_generate_title_for_first_complete_exchange (line 33) | def test_should_generate_title_for_first_complete_exchange(self):
method test_should_not_generate_title_when_disabled_or_already_set (line 45) | def test_should_not_generate_title_when_disabled_or_already_set(self):
method test_should_not_generate_title_after_second_user_turn (line 62) | def test_should_not_generate_title_after_second_user_turn(self):
method test_generate_title_trims_quotes_and_respects_max_chars (line 76) | def test_generate_title_trims_quotes_and_respects_max_chars(self, monk...
method test_generate_title_normalizes_structured_message_and_response_content (line 96) | def test_generate_title_normalizes_structured_message_and_response_con...
method test_generate_title_fallback_when_model_fails (line 127) | def test_generate_title_fallback_when_model_fails(self, monkeypatch):
method test_aafter_model_delegates_to_async_helper (line 147) | def test_aafter_model_delegates_to_async_helper(self, monkeypatch):
method test_after_model_sync_delegates_to_sync_helper (line 157) | def test_after_model_sync_delegates_to_sync_helper(self, monkeypatch):
method test_sync_generate_title_with_model (line 167) | def test_sync_generate_title_with_model(self, monkeypatch):
method test_empty_title_falls_back (line 185) | def test_empty_title_falls_back(self, monkeypatch):
FILE: backend/tests/test_token_usage.py
class TestSerializeMessageUsageMetadata (line 16) | class TestSerializeMessageUsageMetadata:
method test_ai_message_with_usage_metadata (line 19) | def test_ai_message_with_usage_metadata(self):
method test_ai_message_without_usage_metadata (line 33) | def test_ai_message_without_usage_metadata(self):
method test_tool_message_never_has_usage_metadata (line 39) | def test_tool_message_never_has_usage_metadata(self):
method test_human_message_never_has_usage_metadata (line 45) | def test_human_message_never_has_usage_metadata(self):
method test_ai_message_with_tool_calls_and_usage (line 51) | def test_ai_message_with_tool_calls_and_usage(self):
method test_ai_message_with_zero_usage (line 63) | def test_ai_message_with_zero_usage(self):
class TestCumulativeUsageTracking (line 83) | class TestCumulativeUsageTracking:
method test_single_message_usage (line 86) | def test_single_message_usage(self):
method test_multiple_messages_usage (line 95) | def test_multiple_messages_usage(self):
method test_missing_usage_keys_treated_as_zero (line 109) | def test_missing_usage_keys_treated_as_zero(self):
method test_empty_usage_metadata_stays_zero (line 118) | def test_empty_usage_metadata_stays_zero(self):
function _make_agent_mock (line 133) | def _make_agent_mock(chunks):
function _mock_app_config (line 140) | def _mock_app_config():
class TestStreamUsageIntegration (line 153) | class TestStreamUsageIntegration:
method _make_client (line 156) | def _make_client(self):
method test_stream_emits_usage_in_messages_tuple (line 160) | def test_stream_emits_usage_in_messages_tuple(self):
method test_stream_cumulative_usage_in_end_event (line 195) | def test_stream_cumulative_usage_in_end_event(self):
method test_stream_no_usage_metadata_no_usage_in_events (line 231) | def test_stream_no_usage_metadata_no_usage_in_events(self):
method test_stream_usage_with_tool_calls (line 264) | def test_stream_usage_with_tool_calls(self):
FILE: backend/tests/test_tool_error_handling_middleware.py
function _request (line 10) | def _request(name: str = "web_search", tool_call_id: str | None = "tc-1"):
function test_wrap_tool_call_passthrough_on_success (line 17) | def test_wrap_tool_call_passthrough_on_success():
function test_wrap_tool_call_returns_error_tool_message_on_exception (line 27) | def test_wrap_tool_call_returns_error_tool_message_on_exception():
function test_wrap_tool_call_uses_fallback_tool_call_id_when_missing (line 44) | def test_wrap_tool_call_uses_fallback_tool_call_id_when_missing():
function test_wrap_tool_call_reraises_graph_interrupt (line 59) | def test_wrap_tool_call_reraises_graph_interrupt():
function test_awrap_tool_call_returns_error_tool_message_on_exception (line 71) | async def test_awrap_tool_call_returns_error_tool_message_on_exception():
function test_awrap_tool_call_reraises_graph_interrupt (line 88) | async def test_awrap_tool_call_reraises_graph_interrupt():
FILE: backend/tests/test_tool_search.py
function _make_mock_tool (line 20) | def _make_mock_tool(name: str, description: str):
function registry (line 33) | def registry():
function _reset_singleton (line 46) | def _reset_singleton():
class TestToolSearchConfig (line 56) | class TestToolSearchConfig:
method test_default_disabled (line 57) | def test_default_disabled(self):
method test_enabled (line 61) | def test_enabled(self):
method test_load_from_dict (line 65) | def test_load_from_dict(self):
method test_load_from_empty_dict (line 69) | def test_load_from_empty_dict(self):
class TestDeferredToolRegistry (line 77) | class TestDeferredToolRegistry:
method test_register_and_len (line 78) | def test_register_and_len(self, registry):
method test_entries (line 81) | def test_entries(self, registry):
method test_search_select_single (line 86) | def test_search_select_single(self, registry):
method test_search_select_multiple (line 91) | def test_search_select_multiple(self, registry):
method test_search_select_nonexistent (line 96) | def test_search_select_nonexistent(self, registry):
method test_search_plus_keyword (line 100) | def test_search_plus_keyword(self, registry):
method test_search_plus_keyword_with_ranking (line 105) | def test_search_plus_keyword_with_ranking(self, registry):
method test_search_regex_keyword (line 111) | def test_search_regex_keyword(self, registry):
method test_search_regex_description (line 117) | def test_search_regex_description(self, registry):
method test_search_regex_case_insensitive (line 122) | def test_search_regex_case_insensitive(self, registry):
method test_search_invalid_regex_falls_back_to_literal (line 126) | def test_search_invalid_regex_falls_back_to_literal(self, registry):
method test_search_name_match_ranks_higher (line 131) | def test_search_name_match_ranks_higher(self, registry):
method test_search_max_results (line 139) | def test_search_max_results(self):
method test_search_empty_registry (line 146) | def test_search_empty_registry(self):
method test_empty_registry_len (line 150) | def test_empty_registry_len(self):
class TestSingleton (line 158) | class TestSingleton:
method test_default_none (line 159) | def test_default_none(self):
method test_set_and_get (line 162) | def test_set_and_get(self, registry):
method test_reset (line 166) | def test_reset(self, registry):
class TestToolSearchTool (line 175) | class TestToolSearchTool:
method test_no_registry (line 176) | def test_no_registry(self):
method test_no_match (line 182) | def test_no_match(self, registry):
method test_returns_valid_json (line 189) | def test_returns_valid_json(self, registry):
method test_returns_openai_function_format (line 199) | def test_returns_openai_function_format(self, registry):
method test_keyword_search_returns_json (line 211) | def test_keyword_search_returns_json(self, registry):
class TestDeferredToolsPromptSection (line 225) | class TestDeferredToolsPromptSection:
method _mock_app_config (line 227) | def _mock_app_config(self, monkeypatch):
method test_empty_when_disabled (line 237) | def test_empty_when_disabled(self):
method test_empty_when_enabled_but_no_registry (line 244) | def test_empty_when_enabled_but_no_registry(self, monkeypatch):
method test_empty_when_enabled_but_empty_registry (line 252) | def test_empty_when_enabled_but_empty_registry(self, monkeypatch):
method test_lists_tool_names (line 261) | def test_lists_tool_names(self, registry, monkeypatch):
class TestDeferredToolFilterMiddleware (line 280) | class TestDeferredToolFilterMiddleware:
method _ensure_middlewares_package (line 282) | def _ensure_middlewares_package(self):
method test_filters_deferred_tools (line 300) | def test_filters_deferred_tools(self, registry):
method test_no_op_when_no_registry (line 323) | def test_no_op_when_no_registry(self):
method test_preserves_dict_tools (line 342) | def test_preserves_dict_tools(self, registry):
FILE: backend/tests/test_tracing_config.py
function _reset_tracing_cache (line 8) | def _reset_tracing_cache() -> None:
function test_prefers_langsmith_env_names (line 12) | def test_prefers_langsmith_env_names(monkeypatch):
function test_falls_back_to_langchain_env_names (line 28) | def test_falls_back_to_langchain_env_names(monkeypatch):
function test_langsmith_tracing_false_overrides_langchain_tracing_v2_true (line 49) | def test_langsmith_tracing_false_overrides_langchain_tracing_v2_true(mon...
function test_defaults_when_project_not_set (line 62) | def test_defaults_when_project_not_set(monkeypatch):
FILE: backend/tests/test_uploads_middleware_core_logic.py
function _middleware (line 26) | def _middleware(tmp_path: Path) -> UploadsMiddleware:
function _runtime (line 30) | def _runtime(thread_id: str | None = THREAD_ID) -> MagicMock:
function _uploads_dir (line 36) | def _uploads_dir(tmp_path: Path, thread_id: str = THREAD_ID) -> Path:
function _human (line 42) | def _human(content, files=None, **extra_kwargs):
class TestFilesFromKwargs (line 54) | class TestFilesFromKwargs:
method test_returns_none_when_files_field_absent (line 55) | def test_returns_none_when_files_field_absent(self, tmp_path):
method test_returns_none_for_empty_files_list (line 60) | def test_returns_none_for_empty_files_list(self, tmp_path):
method test_returns_none_for_non_list_files (line 65) | def test_returns_none_for_non_list_files(self, tmp_path):
method test_skips_non_dict_entries (line 70) | def test_skips_non_dict_entries(self, tmp_path):
method test_skips_entries_with_empty_filename (line 75) | def test_skips_entries_with_empty_filename(self, tmp_path):
method test_always_uses_virtual_path (line 80) | def test_always_uses_virtual_path(self, tmp_path):
method test_skips_file_that_does_not_exist_on_disk (line 91) | def test_skips_file_that_does_not_exist_on_disk(self, tmp_path):
method test_accepts_file_that_exists_on_disk (line 98) | def test_accepts_file_that_exists_on_disk(self, tmp_path):
method test_skips_nonexistent_but_accepts_existing_in_mixed_list (line 109) | def test_skips_nonexistent_but_accepts_existing_in_mixed_list(self, tm...
method test_no_existence_check_when_uploads_dir_is_none (line 124) | def test_no_existence_check_when_uploads_dir_is_none(self, tmp_path):
method test_size_is_coerced_to_int (line 132) | def test_size_is_coerced_to_int(self, tmp_path):
method test_missing_size_defaults_to_zero (line 139) | def test_missing_size_defaults_to_zero(self, tmp_path):
class TestCreateFilesMessage (line 152) | class TestCreateFilesMessage:
method _new_file (line 153) | def _new_file(self, filename="notes.txt", size=1024):
method test_new_files_section_always_present (line 156) | def test_new_files_section_always_present(self, tmp_path):
method test_historical_section_present_only_when_non_empty (line 165) | def test_historical_section_present_only_when_non_empty(self, tmp_path):
method test_size_formatting_kb (line 176) | def test_size_formatting_kb(self, tmp_path):
method test_size_formatting_mb (line 181) | def test_size_formatting_mb(self, tmp_path):
method test_read_file_instruction_included (line 186) | def test_read_file_instruction_included(self, tmp_path):
method test_empty_new_files_produces_empty_marker (line 191) | def test_empty_new_files_produces_empty_marker(self, tmp_path):
class TestBeforeAgent (line 204) | class TestBeforeAgent:
method _state (line 205) | def _state(self, *messages):
method test_returns_none_when_messages_empty (line 208) | def test_returns_none_when_messages_empty(self, tmp_path):
method test_returns_none_when_last_message_is_not_human (line 212) | def test_returns_none_when_last_message_is_not_human(self, tmp_path):
method test_returns_none_when_no_files_in_kwargs (line 217) | def test_returns_none_when_no_files_in_kwargs(self, tmp_path):
method test_returns_none_when_all_files_missing_from_disk (line 222) | def test_returns_none_when_all_files_missing_from_disk(self, tmp_path):
method test_injects_uploaded_files_tag_into_string_content (line 229) | def test_injects_uploaded_files_tag_into_string_content(self, tmp_path):
method test_injects_uploaded_files_tag_into_list_content (line 245) | def test_injects_uploaded_files_tag_into_list_content(self, tmp_path):
method test_preserves_additional_kwargs_on_updated_message (line 262) | def test_preserves_additional_kwargs_on_updated_message(self, tmp_path):
method test_uploaded_files_returned_in_state_update (line 277) | def test_uploaded_files_returned_in_state_update(self, tmp_path):
method test_historical_files_from_uploads_dir_excluding_new (line 295) | def test_historical_files_from_uploads_dir_excluding_new(self, tmp_path):
method test_no_historical_section_when_upload_dir_is_empty (line 311) | def test_no_historical_section_when_upload_dir_is_empty(self, tmp_path):
method test_no_historical_scan_when_thread_id_is_none (line 322) | def test_no_historical_scan_when_thread_id_is_none(self, tmp_path):
method test_message_id_preserved_on_updated_message (line 332) | def test_message_id_preserved_on_updated_message(self, tmp_path):
FILE: backend/tests/test_uploads_router.py
function test_upload_files_writes_thread_storage_and_skips_local_sandbox_sync (line 11) | def test_upload_files_writes_thread_storage_and_skips_local_sandbox_sync...
function test_upload_files_syncs_non_local_sandbox_and_marks_markdown_file (line 35) | def test_upload_files_syncs_non_local_sandbox_and_marks_markdown_file(tm...
function test_upload_files_rejects_dotdot_and_dot_filenames (line 70) | def test_upload_files_rejects_dotdot_and_dot_filenames(tmp_path):
function test_delete_uploaded_file_removes_generated_markdown_companion (line 101) | def test_delete_uploaded_file_removes_generated_markdown_companion(tmp_p...
FILE: docker/provisioner/app.py
function _init_k8s_client (line 77) | def _init_k8s_client() -> k8s_client.CoreV1Api:
function _wait_for_kubeconfig (line 122) | def _wait_for_kubeconfig(timeout: int = 30) -> None:
function _ensure_namespace (line 146) | def _ensure_namespace() -> None:
function lifespan (line 172) | async def lifespan(_app: FastAPI):
class CreateSandboxRequest (line 187) | class CreateSandboxRequest(BaseModel):
class SandboxResponse (line 192) | class SandboxResponse(BaseModel):
function _pod_name (line 201) | def _pod_name(sandbox_id: str) -> str:
function _svc_name (line 205) | def _svc_name(sandbox_id: str) -> str:
function _sandbox_url (line 209) | def _sandbox_url(node_port: int) -> str:
function _build_pod (line 214) | def _build_pod(sandbox_id: str, thread_id: str) -> k8s_client.V1Pod:
function _build_service (line 311) | def _build_service(sandbox_id: str) -> k8s_client.V1Service:
function _get_node_port (line 342) | def _get_node_port(sandbox_id: str) -> int | None:
function _get_pod_phase (line 354) | def _get_pod_phase(sandbox_id: str) -> str:
function health (line 367) | async def health():
function create_sandbox (line 373) | async def create_sandbox(req: CreateSandboxRequest):
function destroy_sandbox (line 441) | async def destroy_sandbox(sandbox_id: str):
function get_sandbox (line 470) | async def get_sandbox(sandbox_id: str):
function list_sandboxes (line 484) | async def list_sandboxes():
FILE: frontend/public/demo/threads/5aa47db1-d0cb-4eb9-aea5-3dac1b371c5a/user-data/outputs/jiangsu-football/js/data.js
function getTeamById (line 783) | function getTeamById(teamId) {
function formatDate (line 788) | function formatDate(dateString) {
function formatTime (line 795) | function formatTime(timeString) {
FILE: frontend/public/demo/threads/5aa47db1-d0cb-4eb9-aea5-3dac1b371c5a/user-data/outputs/jiangsu-football/js/main.js
function initLoader (line 39) | function initLoader() {
function initThemeToggle (line 54) | function initThemeToggle() {
function initNavigation (line 88) | function initNavigation() {
function initScrollSpy (line 120) | function initScrollSpy() {
function renderTeams (line 146) | function renderTeams() {
function renderStandings (line 192) | function renderStandings() {
function renderFixtures (line 236) | function renderFixtures() {
function renderStats (line 317) | function renderStats() {
function renderScorers (line 323) | function renderScorers() {
function renderAssists (line 359) | function renderAssists() {
function renderTeamStats (line 395) | function renderTeamStats() {
function renderNews (line 451) | function renderNews() {
function initTabs (line 494) | function initTabs() {
function initMobileMenu (line 537) | function initMobileMenu() {
function darkenColor (line 565) | function darkenColor(color, percent) {
function formatDate (line 581) | function formatDate(dateString) {
function getTeamById (line 589) | function getTeamById(teamId) {
FILE: frontend/public/demo/threads/c02bb4d5-4202-490e-ae8f-ff4864fc0d2e/user-data/outputs/script.js
function initNavigation (line 20) | function initNavigation() {
function initQuotesSlider (line 41) | function initQuotesSlider() {
function initScrollReveal (line 94) | function initScrollReveal() {
function initSmoothScroll (line 135) | function initSmoothScroll() {
FILE: frontend/scripts/save-demo.js
function main (line 6) | async function main() {
function copyFolder (line 51) | function copyFolder(relPath, rootPath, backendRootPath) {
FILE: frontend/src/app/layout.tsx
function RootLayout (line 15) | async function RootLayout({
FILE: frontend/src/app/mock/api/mcp/config/route.ts
function GET (line 1) | function GET() {
FILE: frontend/src/app/mock/api/models/route.ts
function GET (line 1) | function GET() {
FILE: frontend/src/app/mock/api/skills/route.ts
function GET (line 1) | function GET() {
FILE: frontend/src/app/mock/api/threads/[thread_id]/artifacts/[[...artifact_path]]/route.ts
function GET (line 6) | async function GET(
FILE: frontend/src/app/mock/api/threads/[thread_id]/history/route.ts
function POST (line 6) | async function POST(
FILE: frontend/src/app/mock/api/threads/search/route.ts
type ThreadSearchRequest (line 4) | type ThreadSearchRequest = {
type MockThreadSearchResult (line 11) | type MockThreadSearchResult = Record<string, unknown> & {
function POST (line 16) | async function POST(request: Request) {
FILE: frontend/src/app/page.tsx
function LandingPage (line 10) | function LandingPage() {
FILE: frontend/src/app/workspace/agents/[agent_name]/chats/[thread_id]/layout.tsx
function AgentChatLayout (line 7) | function AgentChatLayout({
FILE: frontend/src/app/workspace/agents/[agent_name]/chats/[thread_id]/page.tsx
function AgentChatPage (line 28) | function AgentChatPage() {
FILE: frontend/src/app/workspace/agents/new/page.tsx
type Step (line 25) | type Step = "name" | "chat";
constant NAME_RE (line 27) | const NAME_RE = /^[A-Za-z0-9-]+$/;
function NewAgentPage (line 29) | function NewAgentPage() {
FILE: frontend/src/app/workspace/agents/page.tsx
function AgentsPage (line 3) | function AgentsPage() {
FILE: frontend/src/app/workspace/chats/[thread_id]/layout.tsx
function ChatLayout (line 7) | function ChatLayout({
FILE: frontend/src/app/workspace/chats/[thread_id]/page.tsx
function ChatPage (line 27) | function ChatPage() {
FILE: frontend/src/app/workspace/chats/page.tsx
function ChatsPage (line 18) | function ChatsPage() {
FILE: frontend/src/app/workspace/layout.tsx
function WorkspaceLayout (line 13) | function WorkspaceLayout({
FILE: frontend/src/app/workspace/page.tsx
function WorkspacePage (line 8) | function WorkspacePage() {
FILE: frontend/src/components/ai-elements/artifact.tsx
type ArtifactProps (line 14) | type ArtifactProps = HTMLAttributes<HTMLDivElement>;
type ArtifactHeaderProps (line 26) | type ArtifactHeaderProps = HTMLAttributes<HTMLDivElement>;
type ArtifactCloseProps (line 41) | type ArtifactCloseProps = ComponentProps<typeof Button>;
type ArtifactTitleProps (line 65) | type ArtifactTitleProps = HTMLAttributes<HTMLParagraphElement>;
type ArtifactDescriptionProps (line 74) | type ArtifactDescriptionProps = HTMLAttributes<HTMLParagraphElement>;
type ArtifactActionsProps (line 83) | type ArtifactActionsProps = HTMLAttributes<HTMLDivElement>;
type ArtifactActionProps (line 92) | type ArtifactActionProps = ComponentProps<typeof Button> & {
type ArtifactContentProps (line 140) | type ArtifactContentProps = HTMLAttributes<HTMLDivElement>;
FILE: frontend/src/components/ai-elements/canvas.tsx
type CanvasProps (line 5) | type CanvasProps = ReactFlowProps & {
FILE: frontend/src/components/ai-elements/chain-of-thought.tsx
type ChainOfThoughtContextValue (line 26) | type ChainOfThoughtContextValue = {
type ChainOfThoughtProps (line 45) | type ChainOfThoughtProps = ComponentProps<"div"> & {
type ChainOfThoughtHeaderProps (line 81) | type ChainOfThoughtHeaderProps = ComponentProps<
type ChainOfThoughtStepProps (line 116) | type ChainOfThoughtStepProps = ComponentProps<"div"> & {
type ChainOfThoughtSearchResultsProps (line 165) | type ChainOfThoughtSearchResultsProps = ComponentProps<"div">;
type ChainOfThoughtSearchResultProps (line 179) | type ChainOfThoughtSearchResultProps = ComponentProps<typeof Badge>;
type ChainOfThoughtContentProps (line 193) | type ChainOfThoughtContentProps = ComponentProps<
type ChainOfThoughtImageProps (line 218) | type ChainOfThoughtImageProps = ComponentProps<"div"> & {
FILE: frontend/src/components/ai-elements/checkpoint.tsx
type CheckpointProps (line 14) | type CheckpointProps = HTMLAttributes<HTMLDivElement>;
type CheckpointIconProps (line 30) | type CheckpointIconProps = LucideProps;
type CheckpointTriggerProps (line 41) | type CheckpointTriggerProps = ComponentProps<typeof Button> & {
FILE: frontend/src/components/ai-elements/code-block.tsx
type CodeBlockProps (line 17) | type CodeBlockProps = HTMLAttributes<HTMLDivElement> & {
type CodeBlockContextType (line 23) | type CodeBlockContextType = {
method line (line 33) | line(node, line) {
function highlightCode (line 52) | async function highlightCode(
type CodeBlockCopyButtonProps (line 132) | type CodeBlockCopyButtonProps = ComponentProps<typeof Button> & {
FILE: frontend/src/components/ai-elements/connection.tsx
constant HALF (line 3) | const HALF = 0.5;
FILE: frontend/src/components/ai-elements/context.tsx
constant PERCENT_MAX (line 15) | const PERCENT_MAX = 100;
constant ICON_RADIUS (line 16) | const ICON_RADIUS = 10;
constant ICON_VIEWBOX (line 17) | const ICON_VIEWBOX = 24;
constant ICON_CENTER (line 18) | const ICON_CENTER = 12;
constant ICON_STROKE_WIDTH (line 19) | const ICON_STROKE_WIDTH = 2;
type ModelId (line 21) | type ModelId = string;
type ContextSchema (line 23) | type ContextSchema = {
type ContextProps (line 42) | type ContextProps = ComponentProps<typeof HoverCard> & ContextSchema;
type ContextTriggerProps (line 104) | type ContextTriggerProps = ComponentProps<typeof Button>;
type ContextContentProps (line 128) | type ContextContentProps = ComponentProps<typeof HoverCardContent>;
type ContextContentHeaderProps (line 140) | type ContextContentHeaderProps = ComponentProps<"div">;
type ContextContentBodyProps (line 179) | type ContextContentBodyProps = ComponentProps<"div">;
type ContextContentFooterProps (line 191) | type ContextContentFooterProps = ComponentProps<"div">;
type ContextInputUsageProps (line 231) | type ContextInputUsageProps = ComponentProps<"div">;
type ContextOutputUsageProps (line 271) | type ContextOutputUsageProps = ComponentProps<"div">;
type ContextReasoningUsageProps (line 311) | type ContextReasoningUsageProps = ComponentProps<"div">;
type ContextCacheUsageProps (line 351) | type ContextCacheUsageProps = ComponentProps<"div">;
FILE: frontend/src/components/ai-elements/controls.tsx
type ControlsProps (line 7) | type ControlsProps = ComponentProps<typeof ControlsPrimitive>;
FILE: frontend/src/components/ai-elements/conversation.tsx
type ConversationProps (line 10) | type ConversationProps = ComponentProps<typeof StickToBottom>;
type ConversationContentProps (line 22) | type ConversationContentProps = ComponentProps<
type ConversationEmptyStateProps (line 36) | type ConversationEmptyStateProps = ComponentProps<"div"> & {
type ConversationScrollButtonProps (line 71) | type ConversationScrollButtonProps = ComponentProps<typeof Button>;
FILE: frontend/src/components/ai-elements/image.tsx
type ImageProps (line 4) | type ImageProps = Experimental_GeneratedImage & {
FILE: frontend/src/components/ai-elements/loader.tsx
type LoaderIconProps (line 4) | type LoaderIconProps = {
type LoaderProps (line 82) | type LoaderProps = HTMLAttributes<HTMLDivElement> & {
FILE: frontend/src/components/ai-elements/message.tsx
type MessageProps (line 23) | type MessageProps = HTMLAttributes<HTMLDivElement> & {
type MessageContentProps (line 38) | type MessageContentProps = HTMLAttributes<HTMLDivElement>;
type MessageActionsProps (line 59) | type MessageActionsProps = ComponentProps<"div">;
type MessageActionProps (line 71) | type MessageActionProps = ComponentProps<typeof Button> & {
type MessageBranchContextType (line 107) | type MessageBranchContextType = {
type MessageBranchProps (line 132) | type MessageBranchProps = HTMLAttributes<HTMLDivElement> & {
type MessageBranchContentProps (line 182) | type MessageBranchContentProps = HTMLAttributes<HTMLDivElement>;
type MessageBranchSelectorProps (line 212) | type MessageBranchSelectorProps = HTMLAttributes<HTMLDivElement> & {
type MessageBranchPreviousProps (line 237) | type MessageBranchPreviousProps = ComponentProps<typeof Button>;
type MessageBranchNextProps (line 260) | type MessageBranchNextProps = ComponentProps<typeof Button>;
type MessageBranchPageProps (line 284) | type MessageBranchPageProps = HTMLAttributes<HTMLSpanElement>;
type MessageResponseProps (line 305) | type MessageResponseProps = ComponentProps<typeof Streamdown>;
type MessageAttachmentProps (line 322) | type MessageAttachmentProps = HTMLAttributes<HTMLDivElement> & {
function MessageAttachment (line 328) | function MessageAttachment({
type MessageAttachmentsProps (line 406) | type MessageAttachmentsProps = ComponentProps<"div">;
function MessageAttachments (line 408) | function MessageAttachments({
type MessageToolbarProps (line 430) | type MessageToolbarProps = ComponentProps<"div">;
FILE: frontend/src/components/ai-elements/model-selector.tsx
type ModelSelectorProps (line 21) | type ModelSelectorProps = ComponentProps<typeof Dialog>;
type ModelSelectorTriggerProps (line 27) | type ModelSelectorTriggerProps = ComponentProps<typeof DialogTrigger>;
type ModelSelectorContentProps (line 33) | type ModelSelectorContentProps = ComponentProps<typeof DialogContent> & {
type ModelSelectorDialogProps (line 51) | type ModelSelectorDialogProps = ComponentProps<typeof CommandDialog>;
type ModelSelectorInputProps (line 57) | type ModelSelectorInputProps = ComponentProps<typeof CommandInput>;
type ModelSelectorListProps (line 66) | type ModelSelectorListProps = ComponentProps<typeof CommandList>;
type ModelSelectorEmptyProps (line 72) | type ModelSelectorEmptyProps = ComponentProps<typeof CommandEmpty>;
type ModelSelectorGroupProps (line 78) | type ModelSelectorGroupProps = ComponentProps<typeof CommandGroup>;
type ModelSelectorItemProps (line 84) | type ModelSelectorItemProps = ComponentProps<typeof CommandItem>;
type ModelSelectorShortcutProps (line 90) | type ModelSelectorShortcutProps = ComponentProps<typeof CommandShortcut>;
type ModelSelectorSeparatorProps (line 96) | type ModelSelectorSeparatorProps = ComponentProps<
type ModelSelectorLogoProps (line 104) | type ModelSelectorLogoProps = Omit<
type ModelSelectorLogoGroupProps (line 183) | type ModelSelectorLogoGroupProps = ComponentProps<"div">;
type ModelSelectorNameProps (line 198) | type ModelSelectorNameProps = ComponentProps<"span">;
FILE: frontend/src/components/ai-elements/node.tsx
type NodeProps (line 14) | type NodeProps = ComponentProps<typeof Card> & {
type NodeHeaderProps (line 35) | type NodeHeaderProps = ComponentProps<typeof CardHeader>;
type NodeTitleProps (line 44) | type NodeTitleProps = ComponentProps<typeof CardTitle>;
type NodeDescriptionProps (line 48) | type NodeDescriptionProps = ComponentProps<typeof CardDescription>;
type NodeActionProps (line 54) | type NodeActionProps = ComponentProps<typeof CardAction>;
type NodeContentProps (line 58) | type NodeContentProps = ComponentProps<typeof CardContent>;
type NodeFooterProps (line 64) | type NodeFooterProps = ComponentProps<typeof CardFooter>;
FILE: frontend/src/components/ai-elements/open-in-chat.tsx
type OpenInProps (line 196) | type OpenInProps = ComponentProps<typeof DropdownMenu> & {
type OpenInContentProps (line 206) | type OpenInContentProps = ComponentProps<typeof DropdownMenuContent>;
type OpenInItemProps (line 216) | type OpenInItemProps = ComponentProps<typeof DropdownMenuItem>;
type OpenInLabelProps (line 222) | type OpenInLabelProps = ComponentProps<typeof DropdownMenuLabel>;
type OpenInSeparatorProps (line 228) | type OpenInSeparatorProps = ComponentProps<typeof DropdownMenuSeparator>;
type OpenInTriggerProps (line 234) | type OpenInTriggerProps = ComponentProps<typeof DropdownMenuTrigger>;
type OpenInChatGPTProps (line 247) | type OpenInChatGPTProps = ComponentProps<typeof DropdownMenuItem>;
type OpenInClaudeProps (line 267) | type OpenInClaudeProps = ComponentProps<typeof DropdownMenuItem>;
type OpenInT3Props (line 287) | type OpenInT3Props = ComponentProps<typeof DropdownMenuItem>;
type OpenInSciraProps (line 307) | type OpenInSciraProps = ComponentProps<typeof DropdownMenuItem>;
type OpenInv0Props (line 327) | type OpenInv0Props = ComponentProps<typeof DropdownMenuItem>;
type OpenInCursorProps (line 347) | type OpenInCursorProps = ComponentProps<typeof DropdownMenuItem>;
FILE: frontend/src/components/ai-elements/panel.tsx
type PanelProps (line 5) | type PanelProps = ComponentProps<typeof PanelPrimitive>;
FILE: frontend/src/components/ai-elements/plan.tsx
type PlanContextValue (line 24) | type PlanContextValue = {
type PlanProps (line 38) | type PlanProps = ComponentProps<typeof Collapsible> & {
type PlanHeaderProps (line 55) | type PlanHeaderProps = ComponentProps<typeof CardHeader>;
type PlanTitleProps (line 65) | type PlanTitleProps = Omit<
type PlanDescriptionProps (line 82) | type PlanDescriptionProps = Omit<
type PlanActionProps (line 107) | type PlanActionProps = ComponentProps<typeof CardAction>;
type PlanContentProps (line 113) | type PlanContentProps = ComponentProps<typeof CardContent>;
type PlanFooterProps (line 121) | type PlanFooterProps = ComponentProps<"div">;
type PlanTriggerProps (line 127) | type PlanTriggerProps = ComponentProps<typeof CollapsibleTrigger>;
FILE: frontend/src/components/ai-elements/prompt-input.tsx
type AttachmentsContext (line 78) | type AttachmentsContext = {
type TextInputContext (line 87) | type TextInputContext = {
type PromptInputControllerProps (line 93) | type PromptInputControllerProps = {
type PromptInputProviderProps (line 137) | type PromptInputProviderProps = PropsWithChildren<{
function PromptInputProvider (line 145) | function PromptInputProvider({
type PromptInputAttachmentProps (line 280) | type PromptInputAttachmentProps = HTMLAttributes<HTMLDivElement> & {
function PromptInputAttachment (line 285) | function PromptInputAttachment({
type PromptInputAttachmentsProps (line 376) | type PromptInputAttachmentsProps = Omit<
function PromptInputAttachments (line 383) | function PromptInputAttachments({
type PromptInputActionAddAttachmentsProps (line 408) | type PromptInputActionAddAttachmentsProps = ComponentProps<
type PromptInputMessage (line 433) | type PromptInputMessage = {
type PromptInputProps (line 438) | type PromptInputProps = Omit<
type PromptInputBodyProps (line 811) | type PromptInputBodyProps = HTMLAttributes<HTMLDivElement>;
type PromptInputTextareaProps (line 820) | type PromptInputTextareaProps = ComponentProps<
type PromptInputHeaderProps (line 921) | type PromptInputHeaderProps = Omit<
type PromptInputFooterProps (line 937) | type PromptInputFooterProps = Omit<
type PromptInputToolsProps (line 953) | type PromptInputToolsProps = HTMLAttributes<HTMLDivElement>;
type PromptInputButtonProps (line 962) | type PromptInputButtonProps = ComponentProps<typeof InputGroupButton>;
type PromptInputActionMenuProps (line 981) | type PromptInputActionMenuProps = ComponentProps<typeof DropdownMenu>;
type PromptInputActionMenuTriggerProps (line 986) | type PromptInputActionMenuTriggerProps = PromptInputButtonProps;
type PromptInputActionMenuContentProps (line 1000) | type PromptInputActionMenuContentProps = ComponentProps<
type PromptInputActionMenuItemProps (line 1010) | type PromptInputActionMenuItemProps = ComponentProps<
type PromptInputSubmitProps (line 1023) | type PromptInputSubmitProps = ComponentProps<typeof InputGroupButton> & {
type SpeechRecognition (line 1059) | interface SpeechRecognition extends EventTarget {
type SpeechRecognitionEvent (line 1075) | interface SpeechRecognitionEvent extends Event {
type SpeechRecognitionResultList (line 1080) | type SpeechRecognitionResultList = {
type SpeechRecognitionResult (line 1086) | type SpeechRecognitionResult = {
type SpeechRecognitionAlternative (line 1093) | type SpeechRecognitionAlternative = {
type SpeechRecognitionErrorEvent (line 1098) | interface SpeechRecognitionErrorEvent extends Event {
type Window (line 1103) | interface Window {
type PromptInputSpeechButtonProps (line 1113) | type PromptInputSpeechButtonProps = ComponentProps<
type PromptInputSelectProps (line 1219) | type PromptInputSelectProps = ComponentProps<typeof Select>;
type PromptInputSelectTriggerProps (line 1225) | type PromptInputSelectTriggerProps = ComponentProps<
type PromptInputSelectContentProps (line 1243) | type PromptInputSelectContentProps = ComponentProps<
type PromptInputSelectItemProps (line 1254) | type PromptInputSelectItemProps = ComponentProps<typeof SelectItem>;
type PromptInputSelectValueProps (line 1263) | type PromptInputSelectValueProps = ComponentProps<typeof SelectValue>;
type PromptInputHoverCardProps (line 1272) | type PromptInputHoverCardProps = ComponentProps<typeof HoverCard>;
type PromptInputHoverCardTriggerProps (line 1282) | type PromptInputHoverCardTriggerProps = ComponentProps<
type PromptInputHoverCardContentProps (line 1290) | type PromptInputHoverCardContentProps = ComponentProps<
type PromptInputTabsListProps (line 1301) | type PromptInputTabsListProps = HTMLAttributes<HTMLDivElement>;
type PromptInputTabProps (line 1308) | type PromptInputTabProps = HTMLAttributes<HTMLDivElement>;
type PromptInputTabLabelProps (line 1315) | type PromptInputTabLabelProps = HTMLAttributes<HTMLHeadingElement>;
type PromptInputTabBodyProps (line 1330) | type PromptInputTabBodyProps = HTMLAttributes<HTMLDivElement>;
type PromptInputTabItemProps (line 1339) | type PromptInputTabItemProps = HTMLAttributes<HTMLDivElement>;
type PromptInputCommandProps (line 1354) | type PromptInputCommandProps = ComponentProps<typeof Command>;
type PromptInputCommandInputProps (line 1361) | type PromptInputCommandInputProps = ComponentProps<typeof CommandInput>;
type PromptInputCommandListProps (line 1370) | type PromptInputCommandListProps = ComponentProps<typeof CommandList>;
type PromptInputCommandEmptyProps (line 1379) | type PromptInputCommandEmptyProps = ComponentProps<typeof CommandEmpty>;
type PromptInputCommandGroupProps (line 1388) | type PromptInputCommandGroupProps = ComponentProps<typeof CommandGroup>;
type PromptInputCommandItemProps (line 1397) | type PromptInputCommandItemProps = ComponentProps<typeof CommandItem>;
type PromptInputCommandSeparatorProps (line 1406) | type PromptInputCommandSeparatorProps = ComponentProps<
FILE: frontend/src/components/ai-elements/queue.tsx
type QueueMessagePart (line 14) | type QueueMessagePart = {
type QueueMessage (line 22) | type QueueMessage = {
type QueueTodo (line 27) | type QueueTodo = {
type QueueItemProps (line 34) | type QueueItemProps = ComponentProps<"li">;
type QueueItemIndicatorProps (line 46) | type QueueItemIndicatorProps = ComponentProps<"span"> & {
type QueueItemContentProps (line 67) | type QueueItemContentProps = ComponentProps<"span"> & {
type QueueItemDescriptionProps (line 88) | type QueueItemDescriptionProps = ComponentProps<"div"> & {
type QueueItemActionsProps (line 109) | type QueueItemActionsProps = ComponentProps<"div">;
type QueueItemActionProps (line 118) | type QueueItemActionProps = Omit<
type QueueItemAttachmentProps (line 139) | type QueueItemAttachmentProps = ComponentProps<"div">;
type QueueItemImageProps (line 148) | type QueueItemImageProps = ComponentProps<"img">;
type QueueItemFileProps (line 163) | type QueueItemFileProps = ComponentProps<"span">;
type QueueListProps (line 182) | type QueueListProps = ComponentProps<typeof ScrollArea>;
type QueueSectionProps (line 197) | type QueueSectionProps = ComponentProps<typeof Collapsible>;
type QueueSectionTriggerProps (line 208) | type QueueSectionTriggerProps = ComponentProps<"button">;
type QueueSectionLabelProps (line 230) | type QueueSectionLabelProps = ComponentProps<"span"> & {
type QueueSectionContentProps (line 253) | type QueueSectionContentProps = ComponentProps<
type QueueProps (line 264) | type QueueProps = ComponentProps<"div">;
FILE: frontend/src/components/ai-elements/reasoning.tsx
type ReasoningContextValue (line 16) | type ReasoningContextValue = {
type ReasoningProps (line 33) | type ReasoningProps = ComponentProps<typeof Collapsible> & {
constant AUTO_CLOSE_DELAY (line 41) | const AUTO_CLOSE_DELAY = 1000;
constant MS_IN_S (line 42) | const MS_IN_S = 1000;
type ReasoningTriggerProps (line 114) | type ReasoningTriggerProps = ComponentProps<typeof CollapsibleTrigger> & {
type ReasoningContentProps (line 157) | type ReasoningContentProps = ComponentProps<
FILE: frontend/src/components/ai-elements/shimmer.tsx
type TextShimmerProps (line 13) | type TextShimmerProps = {
FILE: frontend/src/components/ai-elements/sources.tsx
type SourcesProps (line 12) | type SourcesProps = ComponentProps<"div">;
type SourcesTriggerProps (line 21) | type SourcesTriggerProps = ComponentProps<typeof CollapsibleTrigger> & {
type SourcesContentProps (line 44) | type SourcesContentProps = ComponentProps<typeof CollapsibleContent>;
type SourceProps (line 60) | type SourceProps = ComponentProps<"a">;
FILE: frontend/src/components/ai-elements/suggestion.tsx
constant STAGGER_DELAY_MS (line 10) | const STAGGER_DELAY_MS = 60;
constant STAGGER_DELAY_MS_OFFSET (line 11) | const STAGGER_DELAY_MS_OFFSET = 250;
type SuggestionsProps (line 13) | type SuggestionsProps = ComponentProps<typeof ScrollArea>;
type SuggestionProps (line 41) | type SuggestionProps = Omit<ComponentProps<typeof Button>, "onClick"> & {
FILE: frontend/src/components/ai-elements/task.tsx
type TaskItemFileProps (line 12) | type TaskItemFileProps = ComponentProps<"div">;
type TaskItemProps (line 30) | type TaskItemProps = ComponentProps<"div">;
type TaskProps (line 38) | type TaskProps = ComponentProps<typeof Collapsible>;
type TaskTriggerProps (line 48) | type TaskTriggerProps = ComponentProps<typeof CollapsibleTrigger> & {
type TaskContentProps (line 69) | type TaskContentProps = ComponentProps<typeof CollapsibleContent>;
FILE: frontend/src/components/ai-elements/toolbar.tsx
type ToolbarProps (line 5) | type ToolbarProps = ComponentProps<typeof NodeToolbar>;
FILE: frontend/src/components/ai-elements/web-preview.tsx
type WebPreviewContextValue (line 21) | type WebPreviewContextValue = {
type WebPreviewProps (line 38) | type WebPreviewProps = ComponentProps<"div"> & {
type WebPreviewNavigationProps (line 80) | type WebPreviewNavigationProps = ComponentProps<"div">;
type WebPreviewNavigationButtonProps (line 95) | type WebPreviewNavigationButtonProps = ComponentProps<typeof Button> & {
type WebPreviewUrlProps (line 127) | type WebPreviewUrlProps = ComponentProps<typeof Input>;
type WebPreviewBodyProps (line 168) | type WebPreviewBodyProps = ComponentProps<"iframe"> & {
type WebPreviewConsoleProps (line 194) | type WebPreviewConsoleProps = ComponentProps<"div"> & {
FILE: frontend/src/components/landing/footer.tsx
function Footer (line 3) | function Footer() {
FILE: frontend/src/components/landing/header.tsx
function Header (line 7) | function Header() {
function StarCounter (line 42) | async function StarCounter() {
FILE: frontend/src/components/landing/hero.tsx
function Hero (line 12) | function Hero({ className }: { className?: string }) {
FILE: frontend/src/components/landing/progressive-skills-animation.tsx
type AnimationPhase (line 19) | type AnimationPhase =
type FileItem (line 32) | interface FileItem {
constant ANIMATION_DELAYS (line 51) | const ANIMATION_DELAYS = {
function ProgressiveSkillsAnimation (line 64) | function ProgressiveSkillsAnimation() {
FILE: frontend/src/components/landing/section.tsx
function Section (line 3) | function Section({
FILE: frontend/src/components/landing/sections/case-study-section.tsx
function CaseStudySection (line 9) | function CaseStudySection({ className }: { className?: string }) {
FILE: frontend/src/components/landing/sections/community-section.tsx
function CommunitySection (line 11) | function CommunitySection() {
FILE: frontend/src/components/landing/sections/sandbox-section.tsx
function SandboxSection (line 11) | function SandboxSection({ className }: { className?: string }) {
FILE: frontend/src/components/landing/sections/skills-section.tsx
function SkillsSection (line 8) | function SkillsSection({ className }: { className?: string }) {
FILE: frontend/src/components/landing/sections/whats-new-section.tsx
constant COLOR (line 8) | const COLOR = "#0a0a0a";
function WhatsNewSection (line 51) | function WhatsNewSection({ className }: { className?: string }) {
FILE: frontend/src/components/theme-provider.tsx
function ThemeProvider (line 6) | function ThemeProvider({
FILE: frontend/src/components/ui/alert.tsx
function Alert (line 22) | function Alert({
function AlertTitle (line 37) | function AlertTitle({ className, ...props }: React.ComponentProps<"div">) {
function AlertDescription (line 50) | function AlertDescription({
FILE: frontend/src/components/ui/aurora-text.tsx
type AuroraTextProps
Condensed preview — 603 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (4,900K chars).
[
{
"path": ".dockerignore",
"chars": 716,
"preview": ".env\nDockerfile\n.dockerignore\n.git\n.gitignore\ndocker/\n\n# Python\n__pycache__/\n*.py[cod]\n*$py.class\n*.so\n.Python\nenv/\nbuil"
},
{
"path": ".gitattributes",
"chars": 726,
"preview": "# Normalize line endings to LF for all text files\n* text=auto eol=lf\n\n# Shell scripts and makefiles must always use LF\n*"
},
{
"path": ".github/ISSUE_TEMPLATE/runtime-information.yml",
"chars": 2880,
"preview": "name: Runtime Information\ndescription: Report runtime/environment details to help reproduce an issue.\ntitle: \"[runtime] "
},
{
"path": ".github/copilot-instructions.md",
"chars": 6888,
"preview": "# Copilot Onboarding Instructions for DeerFlow\n\nUse this file as the default operating guide for this repository. Follow"
},
{
"path": ".github/workflows/backend-unit-tests.yml",
"chars": 918,
"preview": "name: Unit Tests\n\non:\n push:\n branches: [ 'main' ]\n pull_request:\n types: [opened, synchronize, reopened, ready_"
},
{
"path": ".gitignore",
"chars": 652,
"preview": "# DeerFlow docker image cache\ndocker/.cache/\n# OS generated files\n.DS_Store\n*.local\n._*\n.Spotlight-V100\n.Trashes\nehthumb"
},
{
"path": "CONTRIBUTING.md",
"chars": 7300,
"preview": "# Contributing to DeerFlow\n\nThank you for your interest in contributing to DeerFlow! This guide will help you set up you"
},
{
"path": "LICENSE",
"chars": 1134,
"preview": "MIT License\n\nCopyright (c) 2025 Bytedance Ltd. and/or its affiliates\nCopyright (c) 2025-2026 DeerFlow Authors\n\nPermissio"
},
{
"path": "Makefile",
"chars": 5748,
"preview": "# DeerFlow - Unified Development Environment\n\n.PHONY: help config config-upgrade check install dev dev-daemon start stop"
},
{
"path": "README.md",
"chars": 24293,
"preview": "# 🦌 DeerFlow - 2.0\n\nEnglish | [中文](./README_zh.md) | [日本語](./README_ja.md)\n\n[ | [中文](./README_zh.md) | 日本語\n\n[ | 中文 | [日本語](./README_ja.md)\n\n[ when working with code in this repository.\n\n## "
},
{
"path": "backend/CONTRIBUTING.md",
"chars": 9655,
"preview": "# Contributing to DeerFlow Backend\n\nThank you for your interest in contributing to DeerFlow! This document provides guid"
},
{
"path": "backend/Dockerfile",
"chars": 1421,
"preview": "# Backend Development Dockerfile\nFROM python:3.12-slim\n\nARG NODE_MAJOR=22\n\n# Install system dependencies + Node.js (prov"
},
{
"path": "backend/Makefile",
"chars": 298,
"preview": "install:\n\tuv sync\n\ndev:\n\tuv run langgraph dev --no-browser --allow-blocking --no-reload\n\ngateway:\n\tPYTHONPATH=. uv run u"
},
{
"path": "backend/README.md",
"chars": 13189,
"preview": "# DeerFlow Backend\n\nDeerFlow is a LangGraph-based AI super agent with sandbox execution, persistent memory, and extensib"
},
{
"path": "backend/app/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "backend/app/channels/__init__.py",
"chars": 496,
"preview": "\"\"\"IM Channel integration for DeerFlow.\n\nProvides a pluggable channel system that connects external messaging platforms\n"
},
{
"path": "backend/app/channels/base.py",
"chars": 3842,
"preview": "\"\"\"Abstract base class for IM channels.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nfrom abc import ABC, abst"
},
{
"path": "backend/app/channels/feishu.py",
"chars": 24994,
"preview": "\"\"\"Feishu/Lark channel — connects to Feishu via WebSocket (no public IP needed).\"\"\"\n\nfrom __future__ import annotations\n"
},
{
"path": "backend/app/channels/manager.py",
"chars": 28579,
"preview": "\"\"\"ChannelManager — consumes inbound messages and dispatches them to the DeerFlow agent via LangGraph Server.\"\"\"\n\nfrom _"
},
{
"path": "backend/app/channels/message_bus.py",
"chars": 6227,
"preview": "\"\"\"MessageBus — async pub/sub hub that decouples channels from the agent dispatcher.\"\"\"\n\nfrom __future__ import annotati"
},
{
"path": "backend/app/channels/service.py",
"chars": 6437,
"preview": "\"\"\"ChannelService — manages the lifecycle of all IM channels.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nfro"
},
{
"path": "backend/app/channels/slack.py",
"chars": 9092,
"preview": "\"\"\"Slack channel — connects via Socket Mode (no public IP needed).\"\"\"\n\nfrom __future__ import annotations\n\nimport asynci"
},
{
"path": "backend/app/channels/store.py",
"chars": 5458,
"preview": "\"\"\"ChannelStore — persists IM chat-to-DeerFlow thread mappings.\"\"\"\n\nfrom __future__ import annotations\n\nimport json\nimpo"
},
{
"path": "backend/app/channels/telegram.py",
"chars": 13049,
"preview": "\"\"\"Telegram channel — connects via long-polling (no public IP needed).\"\"\"\n\nfrom __future__ import annotations\n\nimport as"
},
{
"path": "backend/app/gateway/__init__.py",
"chars": 159,
"preview": "from .app import app, create_app\nfrom .config import GatewayConfig, get_gateway_config\n\n__all__ = [\"app\", \"create_app\", "
},
{
"path": "backend/app/gateway/app.py",
"chars": 6103,
"preview": "import logging\nfrom collections.abc import AsyncGenerator\nfrom contextlib import asynccontextmanager\n\nfrom fastapi impor"
},
{
"path": "backend/app/gateway/config.py",
"chars": 965,
"preview": "import os\n\nfrom pydantic import BaseModel, Field\n\n\nclass GatewayConfig(BaseModel):\n \"\"\"Configuration for the API Gate"
},
{
"path": "backend/app/gateway/path_utils.py",
"chars": 909,
"preview": "\"\"\"Shared path resolution for thread virtual paths (e.g. mnt/user-data/outputs/...).\"\"\"\n\nfrom pathlib import Path\n\nfrom "
},
{
"path": "backend/app/gateway/routers/__init__.py",
"chars": 145,
"preview": "from . import artifacts, mcp, models, skills, suggestions, uploads\n\n__all__ = [\"artifacts\", \"mcp\", \"models\", \"skills\", \""
},
{
"path": "backend/app/gateway/routers/agents.py",
"chars": 13069,
"preview": "\"\"\"CRUD API for custom agents.\"\"\"\n\nimport logging\nimport re\nimport shutil\n\nimport yaml\nfrom fastapi import APIRouter, HT"
},
{
"path": "backend/app/gateway/routers/artifacts.py",
"chars": 6654,
"preview": "import logging\nimport mimetypes\nimport zipfile\nfrom pathlib import Path\nfrom urllib.parse import quote\n\nfrom fastapi imp"
},
{
"path": "backend/app/gateway/routers/channels.py",
"chars": 1665,
"preview": "\"\"\"Gateway router for IM channel management.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\n\nfrom fastapi import"
},
{
"path": "backend/app/gateway/routers/mcp.py",
"chars": 7367,
"preview": "import json\nimport logging\nfrom pathlib import Path\nfrom typing import Literal\n\nfrom fastapi import APIRouter, HTTPExcep"
},
{
"path": "backend/app/gateway/routers/memory.py",
"chars": 7119,
"preview": "\"\"\"Memory API router for retrieving and managing global memory data.\"\"\"\n\nfrom fastapi import APIRouter\nfrom pydantic imp"
},
{
"path": "backend/app/gateway/routers/models.py",
"chars": 3607,
"preview": "from fastapi import APIRouter, HTTPException\nfrom pydantic import BaseModel, Field\n\nfrom deerflow.config import get_app_"
},
{
"path": "backend/app/gateway/routers/skills.py",
"chars": 15457,
"preview": "import json\nimport logging\nimport shutil\nimport stat\nimport tempfile\nimport zipfile\nfrom pathlib import Path\n\nfrom fasta"
},
{
"path": "backend/app/gateway/routers/suggestions.py",
"chars": 4751,
"preview": "import json\nimport logging\n\nfrom fastapi import APIRouter\nfrom pydantic import BaseModel, Field\n\nfrom deerflow.models im"
},
{
"path": "backend/app/gateway/routers/uploads.py",
"chars": 7309,
"preview": "\"\"\"Upload router for handling file uploads.\"\"\"\n\nimport logging\nfrom pathlib import Path\n\nfrom fastapi import APIRouter, "
},
{
"path": "backend/debug.py",
"chars": 2432,
"preview": "#!/usr/bin/env python\n\"\"\"\nDebug script for lead_agent.\nRun this file directly in VS Code with breakpoints.\n\nRequirements"
},
{
"path": "backend/docs/API.md",
"chars": 11568,
"preview": "# API Reference\n\nThis document provides a complete reference for the DeerFlow backend APIs.\n\n## Overview\n\nDeerFlow backe"
},
{
"path": "backend/docs/APPLE_CONTAINER.md",
"chars": 6354,
"preview": "# Apple Container Support\n\nDeerFlow now supports Apple Container as the preferred container runtime on macOS, with autom"
},
{
"path": "backend/docs/ARCHITECTURE.md",
"chars": 20532,
"preview": "# Architecture Overview\n\nThis document provides a comprehensive overview of the DeerFlow backend architecture.\n\n## Syste"
},
{
"path": "backend/docs/AUTO_TITLE_GENERATION.md",
"chars": 6208,
"preview": "# 自动 Thread Title 生成功能\n\n## 功能说明\n\n自动为对话线程生成标题,在用户首次提问并收到回复后自动触发。\n\n## 实现方式\n\n使用 `TitleMiddleware` 在 `after_model` 钩子中:\n1. 检"
},
{
"path": "backend/docs/CONFIGURATION.md",
"chars": 10204,
"preview": "# Configuration Guide\n\nThis guide explains how to configure DeerFlow for your environment.\n\n## Config Versioning\n\n`confi"
},
{
"path": "backend/docs/FILE_UPLOAD.md",
"chars": 6076,
"preview": "# 文件上传功能\n\n## 概述\n\nDeerFlow 后端提供了完整的文件上传功能,支持多文件上传,并自动将 Office 文档和 PDF 转换为 Markdown 格式。\n\n## 功能特性\n\n- ✅ 支持多文件同时上传\n- ✅ 自动转换文档"
},
{
"path": "backend/docs/HARNESS_APP_SPLIT.md",
"chars": 9993,
"preview": "# DeerFlow 后端拆分设计文档:Harness + App\n\n> 状态:Draft\n> 作者:DeerFlow Team\n> 日期:2026-03-13\n\n## 1. 背景与动机\n\nDeerFlow 后端当前是一个单一 Python"
},
{
"path": "backend/docs/MCP_SERVER.md",
"chars": 2155,
"preview": "# MCP (Model Context Protocol) Configuration\n\nDeerFlow supports configurable MCP servers and skills to extend its capabi"
},
{
"path": "backend/docs/MEMORY_IMPROVEMENTS.md",
"chars": 1996,
"preview": "# Memory System Improvements\n\nThis document tracks memory injection behavior and roadmap status.\n\n## Status (As Of 2026-"
},
{
"path": "backend/docs/MEMORY_IMPROVEMENTS_SUMMARY.md",
"chars": 1298,
"preview": "# Memory System Improvements - Summary\n\n## Sync Note (2026-03-10)\n\nThis summary is synchronized with the `main` branch i"
},
{
"path": "backend/docs/PATH_EXAMPLES.md",
"chars": 6933,
"preview": "# 文件路径使用示例\n\n## 三种路径类型\n\nDeerFlow 的文件上传系统返回三种不同的路径,每种路径用于不同的场景:\n\n### 1. 实际文件系统路径 (path)\n\n```\n.deer-flow/threads/{thread_id"
},
{
"path": "backend/docs/README.md",
"chars": 1962,
"preview": "# Documentation\n\nThis directory contains detailed documentation for the DeerFlow backend.\n\n## Quick Links\n\n| Document | "
},
{
"path": "backend/docs/SETUP.md",
"chars": 2741,
"preview": "# Setup Guide\n\nQuick setup instructions for DeerFlow.\n\n## Configuration Setup\n\nDeerFlow uses a YAML configuration file t"
},
{
"path": "backend/docs/TITLE_GENERATION_IMPLEMENTATION.md",
"chars": 4295,
"preview": "# 自动 Title 生成功能实现总结\n\n## ✅ 已完成的工作\n\n### 1. 核心实现文件\n\n#### [`packages/harness/deerflow/agents/thread_state.py`](../packages/h"
},
{
"path": "backend/docs/TODO.md",
"chars": 1787,
"preview": "# TODO List\n\n## Completed Features\n\n- [x] Launch the sandbox only after the first file system or bash tool is called\n- ["
},
{
"path": "backend/docs/plan_mode_usage.md",
"chars": 7316,
"preview": "# Plan Mode with TodoList Middleware\n\nThis document describes how to enable and use the Plan Mode feature with TodoList "
},
{
"path": "backend/docs/summarization.md",
"chars": 10444,
"preview": "# Conversation Summarization\n\nDeerFlow includes automatic conversation summarization to handle long conversations that a"
},
{
"path": "backend/docs/task_tool_improvements.md",
"chars": 5173,
"preview": "# Task Tool Improvements\n\n## Overview\n\nThe task tool has been improved to eliminate wasteful LLM polling. Previously, wh"
},
{
"path": "backend/langgraph.json",
"chars": 322,
"preview": "{\n \"$schema\": \"https://langgra.ph/schema.json\",\n \"python_version\": \"3.12\",\n \"dependencies\": [\n \".\"\n ],\n \"env\": \""
},
{
"path": "backend/packages/harness/deerflow/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "backend/packages/harness/deerflow/agents/__init__.py",
"chars": 299,
"preview": "from .checkpointer import get_checkpointer, make_checkpointer, reset_checkpointer\nfrom .lead_agent import make_lead_agen"
},
{
"path": "backend/packages/harness/deerflow/agents/checkpointer/__init__.py",
"chars": 245,
"preview": "from .async_provider import make_checkpointer\nfrom .provider import checkpointer_context, get_checkpointer, reset_checkp"
},
{
"path": "backend/packages/harness/deerflow/agents/checkpointer/async_provider.py",
"chars": 3523,
"preview": "\"\"\"Async checkpointer factory.\n\nProvides an **async context manager** for long-running async servers that need\nproper re"
},
{
"path": "backend/packages/harness/deerflow/agents/checkpointer/provider.py",
"chars": 7598,
"preview": "\"\"\"Sync checkpointer factory.\n\nProvides a **sync singleton** and a **sync context manager** for LangGraph\ngraph compilat"
},
{
"path": "backend/packages/harness/deerflow/agents/lead_agent/__init__.py",
"chars": 66,
"preview": "from .agent import make_lead_agent\n\n__all__ = [\"make_lead_agent\"]\n"
},
{
"path": "backend/packages/harness/deerflow/agents/lead_agent/agent.py",
"chars": 15224,
"preview": "import logging\n\nfrom langchain.agents import create_agent\nfrom langchain.agents.middleware import SummarizationMiddlewar"
},
{
"path": "backend/packages/harness/deerflow/agents/lead_agent/prompt.py",
"chars": 22428,
"preview": "from datetime import datetime\n\nfrom deerflow.config.agents_config import load_agent_soul\nfrom deerflow.skills import loa"
},
{
"path": "backend/packages/harness/deerflow/agents/memory/__init__.py",
"chars": 1148,
"preview": "\"\"\"Memory module for DeerFlow.\n\nThis module provides a global memory mechanism that:\n- Stores user context and conversat"
},
{
"path": "backend/packages/harness/deerflow/agents/memory/prompt.py",
"chars": 13723,
"preview": "\"\"\"Prompt templates for memory update and injection.\"\"\"\n\nimport math\nimport re\nfrom typing import Any\n\ntry:\n import t"
},
{
"path": "backend/packages/harness/deerflow/agents/memory/queue.py",
"chars": 5953,
"preview": "\"\"\"Memory update queue with debounce mechanism.\"\"\"\n\nimport threading\nimport time\nfrom dataclasses import dataclass, fiel"
},
{
"path": "backend/packages/harness/deerflow/agents/memory/updater.py",
"chars": 15738,
"preview": "\"\"\"Memory updater for reading, writing, and updating memory data.\"\"\"\n\nimport json\nimport logging\nimport re\nimport uuid\nf"
},
{
"path": "backend/packages/harness/deerflow/agents/middlewares/clarification_middleware.py",
"chars": 6065,
"preview": "\"\"\"Middleware for intercepting clarification requests and presenting them to the user.\"\"\"\n\nfrom collections.abc import C"
},
{
"path": "backend/packages/harness/deerflow/agents/middlewares/dangling_tool_call_middleware.py",
"chars": 4469,
"preview": "\"\"\"Middleware to fix dangling tool calls in message history.\n\nA dangling tool call occurs when an AIMessage contains too"
},
{
"path": "backend/packages/harness/deerflow/agents/middlewares/deferred_tool_filter_middleware.py",
"chars": 2290,
"preview": "\"\"\"Middleware to filter deferred tool schemas from model binding.\n\nWhen tool_search is enabled, MCP tools are registered"
},
{
"path": "backend/packages/harness/deerflow/agents/middlewares/loop_detection_middleware.py",
"chars": 8409,
"preview": "\"\"\"Middleware to detect and break repetitive tool call loops.\n\nP0 safety: prevents the agent from calling the same tool "
},
{
"path": "backend/packages/harness/deerflow/agents/middlewares/memory_middleware.py",
"chars": 5663,
"preview": "\"\"\"Middleware for memory mechanism.\"\"\"\n\nimport re\nfrom typing import Any, override\n\nfrom langchain.agents import AgentSt"
},
{
"path": "backend/packages/harness/deerflow/agents/middlewares/subagent_limit_middleware.py",
"chars": 2839,
"preview": "\"\"\"Middleware to enforce maximum concurrent subagent tool calls per model response.\"\"\"\n\nimport logging\nfrom typing impor"
},
{
"path": "backend/packages/harness/deerflow/agents/middlewares/thread_data_middleware.py",
"chars": 3485,
"preview": "from typing import NotRequired, override\n\nfrom langchain.agents import AgentState\nfrom langchain.agents.middleware impor"
},
{
"path": "backend/packages/harness/deerflow/agents/middlewares/title_middleware.py",
"chars": 5625,
"preview": "\"\"\"Middleware for automatic thread title generation.\"\"\"\n\nimport logging\nfrom typing import NotRequired, override\n\nfrom l"
},
{
"path": "backend/packages/harness/deerflow/agents/middlewares/todo_middleware.py",
"chars": 3868,
"preview": "\"\"\"Middleware that extends TodoListMiddleware with context-loss detection.\n\nWhen the message history is truncated (e.g.,"
},
{
"path": "backend/packages/harness/deerflow/agents/middlewares/tool_error_handling_middleware.py",
"chars": 4223,
"preview": "\"\"\"Tool error handling middleware and shared runtime middleware builders.\"\"\"\n\nimport logging\nfrom collections.abc import"
},
{
"path": "backend/packages/harness/deerflow/agents/middlewares/uploads_middleware.py",
"chars": 8182,
"preview": "\"\"\"Middleware to inject uploaded files information into agent context.\"\"\"\n\nimport logging\nfrom pathlib import Path\nfrom "
},
{
"path": "backend/packages/harness/deerflow/agents/middlewares/view_image_middleware.py",
"chars": 8584,
"preview": "\"\"\"Middleware for injecting image details into conversation before LLM call.\"\"\"\n\nfrom typing import NotRequired, overrid"
},
{
"path": "backend/packages/harness/deerflow/agents/thread_state.py",
"chars": 1862,
"preview": "from typing import Annotated, NotRequired, TypedDict\n\nfrom langchain.agents import AgentState\n\n\nclass SandboxState(Typed"
},
{
"path": "backend/packages/harness/deerflow/client.py",
"chars": 37714,
"preview": "\"\"\"DeerFlowClient — Embedded Python client for DeerFlow agent system.\n\nProvides direct programmatic access to DeerFlow's"
},
{
"path": "backend/packages/harness/deerflow/community/aio_sandbox/__init__.py",
"chars": 418,
"preview": "from .aio_sandbox import AioSandbox\nfrom .aio_sandbox_provider import AioSandboxProvider\nfrom .backend import SandboxBac"
},
{
"path": "backend/packages/harness/deerflow/community/aio_sandbox/aio_sandbox.py",
"chars": 4622,
"preview": "import base64\nimport logging\n\nfrom agent_sandbox import Sandbox as AioSandboxClient\n\nfrom deerflow.sandbox.sandbox impor"
},
{
"path": "backend/packages/harness/deerflow/community/aio_sandbox/aio_sandbox_provider.py",
"chars": 27956,
"preview": "\"\"\"AIO Sandbox Provider — orchestrates sandbox lifecycle with pluggable backends.\n\nThis provider composes:\n- SandboxBack"
},
{
"path": "backend/packages/harness/deerflow/community/aio_sandbox/backend.py",
"chars": 3019,
"preview": "\"\"\"Abstract base class for sandbox provisioning backends.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nimport "
},
{
"path": "backend/packages/harness/deerflow/community/aio_sandbox/local_backend.py",
"chars": 12614,
"preview": "\"\"\"Local container backend for sandbox provisioning.\n\nManages sandbox containers using Docker or Apple Container on the "
},
{
"path": "backend/packages/harness/deerflow/community/aio_sandbox/remote_backend.py",
"chars": 5917,
"preview": "\"\"\"Remote sandbox backend — delegates Pod lifecycle to the provisioner service.\n\nThe provisioner dynamically creates per"
},
{
"path": "backend/packages/harness/deerflow/community/aio_sandbox/sandbox_info.py",
"chars": 1452,
"preview": "\"\"\"Sandbox metadata for cross-process discovery and state persistence.\"\"\"\n\nfrom __future__ import annotations\n\nimport ti"
},
{
"path": "backend/packages/harness/deerflow/community/firecrawl/tools.py",
"chars": 2521,
"preview": "import json\n\nfrom firecrawl import FirecrawlApp\nfrom langchain.tools import tool\n\nfrom deerflow.config import get_app_co"
},
{
"path": "backend/packages/harness/deerflow/community/image_search/__init__.py",
"chars": 70,
"preview": "from .tools import image_search_tool\n\n__all__ = [\"image_search_tool\"]\n"
},
{
"path": "backend/packages/harness/deerflow/community/image_search/tools.py",
"chars": 4463,
"preview": "\"\"\"\nImage Search Tool - Search images using DuckDuckGo for reference in image generation.\n\"\"\"\n\nimport json\nimport loggin"
},
{
"path": "backend/packages/harness/deerflow/community/infoquest/infoquest_client.py",
"chars": 18970,
"preview": "\"\"\"Util that calls InfoQuest Search And Fetch API.\n\nIn order to set this up, follow instructions at:\nhttps://docs.bytepl"
},
{
"path": "backend/packages/harness/deerflow/community/infoquest/tools.py",
"chars": 3941,
"preview": "from langchain.tools import tool\n\nfrom deerflow.config import get_app_config\nfrom deerflow.utils.readability import Read"
},
{
"path": "backend/packages/harness/deerflow/community/jina_ai/jina_client.py",
"chars": 1442,
"preview": "import logging\nimport os\n\nimport requests\n\nlogger = logging.getLogger(__name__)\n\n\nclass JinaClient:\n def crawl(self, "
},
{
"path": "backend/packages/harness/deerflow/community/jina_ai/tools.py",
"chars": 1284,
"preview": "from langchain.tools import tool\n\nfrom deerflow.community.jina_ai.jina_client import JinaClient\nfrom deerflow.config imp"
},
{
"path": "backend/packages/harness/deerflow/community/tavily/tools.py",
"chars": 2208,
"preview": "import json\n\nfrom langchain.tools import tool\nfrom tavily import TavilyClient\n\nfrom deerflow.config import get_app_confi"
},
{
"path": "backend/packages/harness/deerflow/config/__init__.py",
"chars": 549,
"preview": "from .app_config import get_app_config\nfrom .extensions_config import ExtensionsConfig, get_extensions_config\nfrom .memo"
},
{
"path": "backend/packages/harness/deerflow/config/agents_config.py",
"chars": 3512,
"preview": "\"\"\"Configuration and loaders for custom agents.\"\"\"\n\nimport logging\nimport re\nfrom typing import Any\n\nimport yaml\nfrom py"
},
{
"path": "backend/packages/harness/deerflow/config/app_config.py",
"chars": 12660,
"preview": "import logging\nimport os\nfrom pathlib import Path\nfrom typing import Any, Self\n\nimport yaml\nfrom dotenv import load_dote"
},
{
"path": "backend/packages/harness/deerflow/config/checkpointer_config.py",
"chars": 1713,
"preview": "\"\"\"Configuration for LangGraph checkpointer.\"\"\"\n\nfrom typing import Literal\n\nfrom pydantic import BaseModel, Field\n\nChec"
},
{
"path": "backend/packages/harness/deerflow/config/extensions_config.py",
"chars": 10940,
"preview": "\"\"\"Unified extensions configuration for MCP servers and skills.\"\"\"\n\nimport json\nimport os\nfrom pathlib import Path\nfrom "
},
{
"path": "backend/packages/harness/deerflow/config/memory_config.py",
"chars": 2397,
"preview": "\"\"\"Configuration for memory mechanism.\"\"\"\n\nfrom pydantic import BaseModel, Field\n\n\nclass MemoryConfig(BaseModel):\n \"\""
},
{
"path": "backend/packages/harness/deerflow/config/model_config.py",
"chars": 1908,
"preview": "from pydantic import BaseModel, ConfigDict, Field\n\n\nclass ModelConfig(BaseModel):\n \"\"\"Config section for a model\"\"\"\n\n"
},
{
"path": "backend/packages/harness/deerflow/config/paths.py",
"chars": 8033,
"preview": "import os\nimport re\nfrom pathlib import Path\n\n# Virtual path prefix seen by agents inside the sandbox\nVIRTUAL_PATH_PREFI"
},
{
"path": "backend/packages/harness/deerflow/config/sandbox_config.py",
"chars": 2721,
"preview": "from pydantic import BaseModel, ConfigDict, Field\n\n\nclass VolumeMountConfig(BaseModel):\n \"\"\"Configuration for a volum"
},
{
"path": "backend/packages/harness/deerflow/config/skills_config.py",
"chars": 1573,
"preview": "from pathlib import Path\n\nfrom pydantic import BaseModel, Field\n\n\nclass SkillsConfig(BaseModel):\n \"\"\"Configuration fo"
},
{
"path": "backend/packages/harness/deerflow/config/subagents_config.py",
"chars": 2230,
"preview": "\"\"\"Configuration for the subagent system loaded from config.yaml.\"\"\"\n\nimport logging\n\nfrom pydantic import BaseModel, Fi"
},
{
"path": "backend/packages/harness/deerflow/config/summarization_config.py",
"chars": 2963,
"preview": "\"\"\"Configuration for conversation summarization.\"\"\"\n\nfrom typing import Literal\n\nfrom pydantic import BaseModel, Field\n\n"
},
{
"path": "backend/packages/harness/deerflow/config/title_config.py",
"chars": 1596,
"preview": "\"\"\"Configuration for automatic thread title generation.\"\"\"\n\nfrom pydantic import BaseModel, Field\n\n\nclass TitleConfig(Ba"
},
{
"path": "backend/packages/harness/deerflow/config/tool_config.py",
"chars": 632,
"preview": "from pydantic import BaseModel, ConfigDict, Field\n\n\nclass ToolGroupConfig(BaseModel):\n \"\"\"Config section for a tool g"
},
{
"path": "backend/packages/harness/deerflow/config/tool_search_config.py",
"chars": 1126,
"preview": "\"\"\"Configuration for deferred tool loading via tool_search.\"\"\"\n\nfrom pydantic import BaseModel, Field\n\n\nclass ToolSearch"
},
{
"path": "backend/packages/harness/deerflow/config/tracing_config.py",
"chars": 3441,
"preview": "import logging\nimport os\nimport threading\n\nfrom pydantic import BaseModel, Field\n\nlogger = logging.getLogger(__name__)\n_"
},
{
"path": "backend/packages/harness/deerflow/mcp/__init__.py",
"chars": 434,
"preview": "\"\"\"MCP (Model Context Protocol) integration using langchain-mcp-adapters.\"\"\"\n\nfrom .cache import get_cached_mcp_tools, i"
},
{
"path": "backend/packages/harness/deerflow/mcp/cache.py",
"chars": 4601,
"preview": "\"\"\"Cache for MCP tools to avoid repeated loading.\"\"\"\n\nimport asyncio\nimport logging\nimport os\n\nfrom langchain_core.tools"
},
{
"path": "backend/packages/harness/deerflow/mcp/client.py",
"chars": 2359,
"preview": "\"\"\"MCP client using langchain-mcp-adapters.\"\"\"\n\nimport logging\nfrom typing import Any\n\nfrom deerflow.config.extensions_c"
},
{
"path": "backend/packages/harness/deerflow/mcp/oauth.py",
"chars": 5905,
"preview": "\"\"\"OAuth token support for MCP HTTP/SSE servers.\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport logging\nf"
},
{
"path": "backend/packages/harness/deerflow/mcp/tools.py",
"chars": 2690,
"preview": "\"\"\"Load MCP tools using langchain-mcp-adapters.\"\"\"\n\nimport logging\n\nfrom langchain_core.tools import BaseTool\n\nfrom deer"
},
{
"path": "backend/packages/harness/deerflow/models/__init__.py",
"chars": 72,
"preview": "from .factory import create_chat_model\n\n__all__ = [\"create_chat_model\"]\n"
},
{
"path": "backend/packages/harness/deerflow/models/claude_provider.py",
"chars": 10406,
"preview": "\"\"\"Custom Claude provider with OAuth Bearer auth, prompt caching, and smart thinking.\n\nSupports two authentication modes"
},
{
"path": "backend/packages/harness/deerflow/models/credential_loader.py",
"chars": 7043,
"preview": "\"\"\"Auto-load credentials from Claude Code CLI and Codex CLI.\n\nImplements two credential strategies:\n 1. Claude Code OAu"
},
{
"path": "backend/packages/harness/deerflow/models/factory.py",
"chars": 4503,
"preview": "import logging\n\nfrom langchain.chat_models import BaseChatModel\n\nfrom deerflow.config import get_app_config, get_tracing"
},
{
"path": "backend/packages/harness/deerflow/models/openai_codex_provider.py",
"chars": 15518,
"preview": "\"\"\"Custom OpenAI Codex provider using ChatGPT Codex Responses API.\n\nUses Codex CLI OAuth tokens with chatgpt.com/backend"
},
{
"path": "backend/packages/harness/deerflow/models/patched_deepseek.py",
"chars": 3006,
"preview": "\"\"\"Patched ChatDeepSeek that preserves reasoning_content in multi-turn conversations.\n\nThis module provides a patched ve"
},
{
"path": "backend/packages/harness/deerflow/models/patched_minimax.py",
"chars": 8299,
"preview": "\"\"\"Patched ChatOpenAI adapter for MiniMax reasoning output.\n\nMiniMax's OpenAI-compatible chat completions API can return"
},
{
"path": "backend/packages/harness/deerflow/reflection/__init__.py",
"chars": 104,
"preview": "from .resolvers import resolve_class, resolve_variable\n\n__all__ = [\"resolve_class\", \"resolve_variable\"]\n"
},
{
"path": "backend/packages/harness/deerflow/reflection/resolvers.py",
"chars": 4084,
"preview": "from importlib import import_module\n\nMODULE_TO_PACKAGE_HINTS = {\n \"langchain_google_genai\": \"langchain-google-genai\","
},
{
"path": "backend/packages/harness/deerflow/sandbox/__init__.py",
"chars": 178,
"preview": "from .sandbox import Sandbox\nfrom .sandbox_provider import SandboxProvider, get_sandbox_provider\n\n__all__ = [\n \"Sandb"
},
{
"path": "backend/packages/harness/deerflow/sandbox/exceptions.py",
"chars": 2235,
"preview": "\"\"\"Sandbox-related exceptions with structured error information.\"\"\"\n\n\nclass SandboxError(Exception):\n \"\"\"Base excepti"
},
{
"path": "backend/packages/harness/deerflow/sandbox/local/__init__.py",
"chars": 93,
"preview": "from .local_sandbox_provider import LocalSandboxProvider\n\n__all__ = [\"LocalSandboxProvider\"]\n"
},
{
"path": "backend/packages/harness/deerflow/sandbox/local/list_dir.py",
"chars": 2475,
"preview": "import fnmatch\nfrom pathlib import Path\n\nIGNORE_PATTERNS = [\n # Version Control\n \".git\",\n \".svn\",\n \".hg\",\n "
},
{
"path": "backend/packages/harness/deerflow/sandbox/local/local_sandbox.py",
"chars": 2379,
"preview": "import os\nimport shutil\nimport subprocess\n\nfrom deerflow.sandbox.local.list_dir import list_dir\nfrom deerflow.sandbox.sa"
},
{
"path": "backend/packages/harness/deerflow/sandbox/local/local_sandbox_provider.py",
"chars": 1070,
"preview": "from deerflow.sandbox.local.local_sandbox import LocalSandbox\nfrom deerflow.sandbox.sandbox import Sandbox\nfrom deerflow"
},
{
"path": "backend/packages/harness/deerflow/sandbox/middleware.py",
"chars": 3150,
"preview": "import logging\nfrom typing import NotRequired, override\n\nfrom langchain.agents import AgentState\nfrom langchain.agents.m"
},
{
"path": "backend/packages/harness/deerflow/sandbox/sandbox.py",
"chars": 1840,
"preview": "from abc import ABC, abstractmethod\n\n\nclass Sandbox(ABC):\n \"\"\"Abstract base class for sandbox environments\"\"\"\n\n _i"
},
{
"path": "backend/packages/harness/deerflow/sandbox/sandbox_provider.py",
"chars": 2959,
"preview": "from abc import ABC, abstractmethod\n\nfrom deerflow.config import get_app_config\nfrom deerflow.reflection import resolve_"
},
{
"path": "backend/packages/harness/deerflow/sandbox/tools.py",
"chars": 28321,
"preview": "import re\nfrom pathlib import Path\n\nfrom langchain.tools import ToolRuntime, tool\nfrom langgraph.typing import ContextT\n"
},
{
"path": "backend/packages/harness/deerflow/skills/__init__.py",
"chars": 288,
"preview": "from .loader import get_skills_root_path, load_skills\nfrom .types import Skill\nfrom .validation import ALLOWED_FRONTMATT"
},
{
"path": "backend/packages/harness/deerflow/skills/loader.py",
"chars": 3687,
"preview": "import os\nfrom pathlib import Path\n\nfrom .parser import parse_skill_file\nfrom .types import Skill\n\n\ndef get_skills_root_"
},
{
"path": "backend/packages/harness/deerflow/skills/parser.py",
"chars": 1929,
"preview": "import re\nfrom pathlib import Path\n\nfrom .types import Skill\n\n\ndef parse_skill_file(skill_file: Path, category: str, rel"
},
{
"path": "backend/packages/harness/deerflow/skills/types.py",
"chars": 1801,
"preview": "from dataclasses import dataclass\nfrom pathlib import Path\n\n\n@dataclass\nclass Skill:\n \"\"\"Represents a skill with its "
},
{
"path": "backend/packages/harness/deerflow/skills/validation.py",
"chars": 3307,
"preview": "\"\"\"Skill frontmatter validation utilities.\n\nPure-logic validation of SKILL.md frontmatter — no FastAPI or HTTP dependenc"
},
{
"path": "backend/packages/harness/deerflow/subagents/__init__.py",
"chars": 280,
"preview": "from .config import SubagentConfig\nfrom .executor import SubagentExecutor, SubagentResult\nfrom .registry import get_suba"
},
{
"path": "backend/packages/harness/deerflow/subagents/builtins/__init__.py",
"chars": 341,
"preview": "\"\"\"Built-in subagent configurations.\"\"\"\n\nfrom .bash_agent import BASH_AGENT_CONFIG\nfrom .general_purpose import GENERAL_"
},
{
"path": "backend/packages/harness/deerflow/subagents/builtins/bash_agent.py",
"chars": 1662,
"preview": "\"\"\"Bash command execution subagent configuration.\"\"\"\n\nfrom deerflow.subagents.config import SubagentConfig\n\nBASH_AGENT_C"
},
{
"path": "backend/packages/harness/deerflow/subagents/builtins/general_purpose.py",
"chars": 1829,
"preview": "\"\"\"General-purpose subagent configuration.\"\"\"\n\nfrom deerflow.subagents.config import SubagentConfig\n\nGENERAL_PURPOSE_CON"
},
{
"path": "backend/packages/harness/deerflow/subagents/config.py",
"chars": 993,
"preview": "\"\"\"Subagent configuration definitions.\"\"\"\n\nfrom dataclasses import dataclass, field\n\n\n@dataclass\nclass SubagentConfig:\n "
},
{
"path": "backend/packages/harness/deerflow/subagents/executor.py",
"chars": 21452,
"preview": "\"\"\"Subagent execution engine.\"\"\"\n\nimport asyncio\nimport logging\nimport threading\nimport uuid\nfrom concurrent.futures imp"
},
{
"path": "backend/packages/harness/deerflow/subagents/registry.py",
"chars": 1638,
"preview": "\"\"\"Subagent registry for managing available subagents.\"\"\"\n\nimport logging\nfrom dataclasses import replace\n\nfrom deerflow"
},
{
"path": "backend/packages/harness/deerflow/tools/__init__.py",
"chars": 74,
"preview": "from .tools import get_available_tools\n\n__all__ = [\"get_available_tools\"]\n"
},
{
"path": "backend/packages/harness/deerflow/tools/builtins/__init__.py",
"chars": 353,
"preview": "from .clarification_tool import ask_clarification_tool\nfrom .present_file_tool import present_file_tool\nfrom .setup_agen"
},
{
"path": "backend/packages/harness/deerflow/tools/builtins/clarification_tool.py",
"chars": 2638,
"preview": "from typing import Literal\n\nfrom langchain.tools import tool\n\n\n@tool(\"ask_clarification\", parse_docstring=True, return_d"
},
{
"path": "backend/packages/harness/deerflow/tools/builtins/present_file_tool.py",
"chars": 3791,
"preview": "from pathlib import Path\nfrom typing import Annotated\n\nfrom langchain.tools import InjectedToolCallId, ToolRuntime, tool"
},
{
"path": "backend/packages/harness/deerflow/tools/builtins/setup_agent_tool.py",
"chars": 2176,
"preview": "import logging\n\nimport yaml\nfrom langchain_core.messages import ToolMessage\nfrom langchain_core.tools import tool\nfrom l"
},
{
"path": "backend/packages/harness/deerflow/tools/builtins/task_tool.py",
"chars": 8786,
"preview": "\"\"\"Task tool for delegating work to subagents.\"\"\"\n\nimport logging\nimport time\nimport uuid\nfrom dataclasses import replac"
},
{
"path": "backend/packages/harness/deerflow/tools/builtins/tool_search.py",
"chars": 5438,
"preview": "\"\"\"Tool search — deferred tool discovery at runtime.\n\nContains:\n- DeferredToolRegistry: stores deferred tools and handle"
},
{
"path": "backend/packages/harness/deerflow/tools/builtins/view_image_tool.py",
"chars": 3550,
"preview": "import base64\nimport mimetypes\nfrom pathlib import Path\nfrom typing import Annotated\n\nfrom langchain.tools import Inject"
},
{
"path": "backend/packages/harness/deerflow/tools/tools.py",
"chars": 4346,
"preview": "import logging\n\nfrom langchain.tools import BaseTool\n\nfrom deerflow.config import get_app_config\nfrom deerflow.reflectio"
},
{
"path": "backend/packages/harness/deerflow/utils/file_conversion.py",
"chars": 1210,
"preview": "\"\"\"File conversion utilities.\n\nConverts document files (PDF, PPT, Excel, Word) to Markdown using markitdown.\nNo FastAPI "
},
{
"path": "backend/packages/harness/deerflow/utils/network.py",
"chars": 4448,
"preview": "\"\"\"Thread-safe network utilities.\"\"\"\n\nimport socket\nimport threading\nfrom contextlib import contextmanager\n\n\nclass PortA"
},
{
"path": "backend/packages/harness/deerflow/utils/readability.py",
"chars": 2860,
"preview": "import logging\nimport re\nimport subprocess\nfrom urllib.parse import urljoin\n\nfrom markdownify import markdownify as md\nf"
},
{
"path": "backend/packages/harness/pyproject.toml",
"chars": 1028,
"preview": "[project]\nname = \"deerflow-harness\"\nversion = \"0.1.0\"\ndescription = \"DeerFlow agent harness framework\"\nrequires-python ="
},
{
"path": "backend/pyproject.toml",
"chars": 677,
"preview": "[project]\nname = \"deer-flow\"\nversion = \"0.1.0\"\ndescription = \"LangGraph-based AI agent system with sandbox execution cap"
},
{
"path": "backend/ruff.toml",
"chars": 204,
"preview": "line-length = 240\ntarget-version = \"py312\"\n\n[lint]\nselect = [\"E\", \"F\", \"I\", \"UP\"]\nignore = []\n\n[lint.isort]\nknown-first-"
},
{
"path": "backend/tests/conftest.py",
"chars": 1290,
"preview": "\"\"\"Test configuration for the backend test suite.\n\nSets up sys.path and pre-mocks modules that would cause circular impo"
},
{
"path": "backend/tests/test_app_config_reload.py",
"chars": 2755,
"preview": "from __future__ import annotations\n\nimport json\nimport os\nfrom pathlib import Path\n\nimport yaml\n\nfrom deerflow.config.ap"
},
{
"path": "backend/tests/test_artifacts_router.py",
"chars": 1085,
"preview": "import asyncio\nfrom pathlib import Path\n\nfrom starlette.requests import Request\n\nimport app.gateway.routers.artifacts as"
},
{
"path": "backend/tests/test_channel_file_attachments.py",
"chars": 15413,
"preview": "\"\"\"Tests for channel file attachment support (ResolvedAttachment, resolution, send_file).\"\"\"\n\nfrom __future__ import ann"
},
{
"path": "backend/tests/test_channels.py",
"chars": 73233,
"preview": "\"\"\"Tests for the IM channel system (MessageBus, ChannelStore, ChannelManager).\"\"\"\n\nfrom __future__ import annotations\n\ni"
},
{
"path": "backend/tests/test_checkpointer.py",
"chars": 10617,
"preview": "\"\"\"Unit tests for checkpointer config and singleton factory.\"\"\"\n\nimport sys\nfrom unittest.mock import MagicMock, patch\n\n"
},
{
"path": "backend/tests/test_checkpointer_none_fix.py",
"chars": 2518,
"preview": "\"\"\"Test for issue #1016: checkpointer should not return None.\"\"\"\n\nfrom unittest.mock import MagicMock, patch\n\nimport pyt"
},
{
"path": "backend/tests/test_cli_auth_providers.py",
"chars": 4797,
"preview": "from __future__ import annotations\n\nimport json\n\nimport pytest\nfrom langchain_core.messages import HumanMessage, SystemM"
},
{
"path": "backend/tests/test_client.py",
"chars": 69509,
"preview": "\"\"\"Tests for DeerFlowClient.\"\"\"\n\nimport asyncio\nimport concurrent.futures\nimport json\nimport tempfile\nimport zipfile\nfro"
},
{
"path": "backend/tests/test_client_live.py",
"chars": 13324,
"preview": "\"\"\"Live integration tests for DeerFlowClient with real API.\n\nThese tests require a working config.yaml with valid API cr"
},
{
"path": "backend/tests/test_config_version.py",
"chars": 4721,
"preview": "\"\"\"Tests for config version check and upgrade logic.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nimport tempf"
},
{
"path": "backend/tests/test_credential_loader.py",
"chars": 4852,
"preview": "import json\nimport os\n\nfrom deerflow.models.credential_loader import (\n load_claude_code_credential,\n load_codex_c"
},
{
"path": "backend/tests/test_custom_agent.py",
"chars": 21171,
"preview": "\"\"\"Tests for custom agent support.\"\"\"\n\nfrom __future__ import annotations\n\nfrom pathlib import Path\nfrom unittest.mock i"
},
{
"path": "backend/tests/test_docker_sandbox_mode_detection.py",
"chars": 2600,
"preview": "\"\"\"Regression tests for docker sandbox mode detection logic.\"\"\"\n\nfrom __future__ import annotations\n\nimport subprocess\ni"
},
{
"path": "backend/tests/test_feishu_parser.py",
"chars": 2637,
"preview": "import json\nfrom unittest.mock import MagicMock\n\nimport pytest\n\nfrom app.channels.feishu import FeishuChannel\nfrom app.c"
},
{
"path": "backend/tests/test_harness_boundary.py",
"chars": 1704,
"preview": "\"\"\"Boundary check: harness layer must not import from app layer.\n\nThe deerflow-harness package (packages/harness/deerflo"
},
{
"path": "backend/tests/test_infoquest_client.py",
"chars": 16000,
"preview": "\"\"\"Tests for InfoQuest client and tools.\"\"\"\n\nimport json\nfrom unittest.mock import MagicMock, patch\n\nfrom deerflow.commu"
},
{
"path": "backend/tests/test_lead_agent_model_resolution.py",
"chars": 4704,
"preview": "\"\"\"Tests for lead agent runtime model resolution behavior.\"\"\"\n\nfrom __future__ import annotations\n\nimport pytest\n\nfrom d"
},
{
"path": "backend/tests/test_local_sandbox_encoding.py",
"chars": 1166,
"preview": "import builtins\n\nimport deerflow.sandbox.local.local_sandbox as local_sandbox\nfrom deerflow.sandbox.local.local_sandbox "
},
{
"path": "backend/tests/test_loop_detection_middleware.py",
"chars": 8333,
"preview": "\"\"\"Tests for LoopDetectionMiddleware.\"\"\"\n\nfrom unittest.mock import MagicMock\n\nfrom langchain_core.messages import AIMes"
},
{
"path": "backend/tests/test_mcp_client_config.py",
"chars": 3017,
"preview": "\"\"\"Core behavior tests for MCP client server config building.\"\"\"\n\nimport pytest\n\nfrom deerflow.config.extensions_config "
},
{
"path": "backend/tests/test_mcp_oauth.py",
"chars": 5893,
"preview": "\"\"\"Tests for MCP OAuth support.\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nfrom typing import Any\n\nfrom deer"
},
{
"path": "backend/tests/test_memory_prompt_injection.py",
"chars": 4594,
"preview": "\"\"\"Tests for memory prompt injection formatting.\"\"\"\n\nimport math\n\nfrom deerflow.agents.memory.prompt import _coerce_conf"
},
{
"path": "backend/tests/test_memory_updater.py",
"chars": 10755,
"preview": "from unittest.mock import MagicMock, patch\n\nfrom deerflow.agents.memory.prompt import format_conversation_for_update\nfro"
},
{
"path": "backend/tests/test_memory_upload_filtering.py",
"chars": 9577,
"preview": "\"\"\"Tests for upload-event filtering in the memory pipeline.\n\nCovers two functions introduced to prevent ephemeral file-u"
},
{
"path": "backend/tests/test_model_config.py",
"chars": 847,
"preview": "from deerflow.config.model_config import ModelConfig\n\n\ndef _make_model(**overrides) -> ModelConfig:\n return ModelConf"
},
{
"path": "backend/tests/test_model_factory.py",
"chars": 22434,
"preview": "\"\"\"Tests for deerflow.models.factory.create_chat_model.\"\"\"\n\nfrom __future__ import annotations\n\nimport pytest\nfrom langc"
}
]
// ... and 403 more files (download for full content)
About this extraction
This page contains the full source code of the bytedance/deer-flow GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 603 files (4.4 MB), approximately 1.2M tokens, and a symbol index with 2496 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.