Copy disabled (too large)
Download .txt
Showing preview only (13,562K chars total). Download the full file to get everything.
Repository: langchain-ai/deepagents
Branch: main
Commit: c492a7697e39
Files: 490
Total size: 12.8 MB
Directory structure:
gitextract_1kmmfaan/
├── .github/
│ ├── CODEOWNERS
│ ├── ISSUE_TEMPLATE/
│ │ ├── bug-report.yml
│ │ ├── config.yml
│ │ ├── feature-request.yml
│ │ └── privileged.yml
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── RELEASING.md
│ ├── actions/
│ │ └── uv_setup/
│ │ └── action.yml
│ ├── dependabot.yml
│ ├── scripts/
│ │ ├── aggregate_evals.py
│ │ ├── check_extras_sync.py
│ │ ├── check_version_equality.py
│ │ ├── models.py
│ │ ├── pr-labeler-config.json
│ │ └── pr-labeler.js
│ └── workflows/
│ ├── _benchmark.yml
│ ├── _lint.yml
│ ├── _test.yml
│ ├── auto-label-by-package.yml
│ ├── check_extras_sync.yml
│ ├── check_lockfiles.yml
│ ├── check_sdk_pin.yml
│ ├── check_versions.yml
│ ├── ci.yml
│ ├── deepagents-example.yml
│ ├── evals.yml
│ ├── harbor.yml
│ ├── pr_labeler.yml
│ ├── pr_labeler_backfill.yml
│ ├── pr_lint.yml
│ ├── release-please.yml
│ ├── release.yml
│ ├── require_issue_link.yml
│ ├── sync_priority_labels.yml
│ └── tag-external-issues.yml
├── .gitignore
├── .markdownlint.json
├── .mcp.json
├── .pre-commit-config.yaml
├── .release-please-manifest.json
├── .vscode/
│ ├── extensions.json
│ └── settings.json
├── AGENTS.md
├── LICENSE
├── Makefile
├── README.md
├── action.yml
├── examples/
│ ├── README.md
│ ├── content-builder-agent/
│ │ ├── .gitignore
│ │ ├── AGENTS.md
│ │ ├── README.md
│ │ ├── content_writer.py
│ │ ├── pyproject.toml
│ │ ├── skills/
│ │ │ ├── blog-post/
│ │ │ │ └── SKILL.md
│ │ │ └── social-media/
│ │ │ └── SKILL.md
│ │ └── subagents.yaml
│ ├── deep_research/
│ │ ├── README.md
│ │ ├── agent.py
│ │ ├── langgraph.json
│ │ ├── pyproject.toml
│ │ ├── research_agent/
│ │ │ ├── __init__.py
│ │ │ ├── prompts.py
│ │ │ └── tools.py
│ │ ├── research_agent.ipynb
│ │ └── utils.py
│ ├── downloading_agents/
│ │ └── README.md
│ ├── nvidia_deep_agent/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── langgraph.json
│ │ ├── pyproject.toml
│ │ ├── skills/
│ │ │ ├── cudf-analytics/
│ │ │ │ └── SKILL.md
│ │ │ ├── cuml-machine-learning/
│ │ │ │ └── SKILL.md
│ │ │ ├── data-visualization/
│ │ │ │ └── SKILL.md
│ │ │ └── gpu-document-processing/
│ │ │ └── SKILL.md
│ │ └── src/
│ │ ├── AGENTS.md
│ │ ├── __init__.py
│ │ ├── agent.py
│ │ ├── backend.py
│ │ ├── prompts.py
│ │ └── tools.py
│ ├── ralph_mode/
│ │ ├── README.md
│ │ └── ralph_mode.py
│ └── text-to-sql-agent/
│ ├── .gitignore
│ ├── AGENTS.md
│ ├── README.md
│ ├── agent.py
│ ├── pyproject.toml
│ └── skills/
│ ├── query-writing/
│ │ └── SKILL.md
│ └── schema-exploration/
│ └── SKILL.md
├── libs/
│ ├── README.md
│ ├── acp/
│ │ ├── Makefile
│ │ ├── README.md
│ │ ├── deepagents_acp/
│ │ │ ├── __init__.py
│ │ │ ├── __main__.py
│ │ │ ├── py.typed.py
│ │ │ ├── server.py
│ │ │ └── utils.py
│ │ ├── examples/
│ │ │ ├── __init__.py
│ │ │ ├── demo_agent.py
│ │ │ └── local_context.py
│ │ ├── pyproject.toml
│ │ ├── run_demo_agent.sh
│ │ └── tests/
│ │ ├── __init__.py
│ │ ├── chat_model.py
│ │ ├── test_agent.py
│ │ ├── test_command_allowlist.py
│ │ ├── test_main.py
│ │ └── test_utils.py
│ ├── cli/
│ │ ├── CHANGELOG.md
│ │ ├── Makefile
│ │ ├── README.md
│ │ ├── deepagents_cli/
│ │ │ ├── __init__.py
│ │ │ ├── __main__.py
│ │ │ ├── _ask_user_types.py
│ │ │ ├── _cli_context.py
│ │ │ ├── _debug.py
│ │ │ ├── _server_config.py
│ │ │ ├── _server_constants.py
│ │ │ ├── _session_stats.py
│ │ │ ├── _testing_models.py
│ │ │ ├── _version.py
│ │ │ ├── agent.py
│ │ │ ├── app.py
│ │ │ ├── app.tcss
│ │ │ ├── ask_user.py
│ │ │ ├── built_in_skills/
│ │ │ │ ├── __init__.py
│ │ │ │ └── skill-creator/
│ │ │ │ ├── SKILL.md
│ │ │ │ └── scripts/
│ │ │ │ ├── init_skill.py
│ │ │ │ └── quick_validate.py
│ │ │ ├── clipboard.py
│ │ │ ├── command_registry.py
│ │ │ ├── config.py
│ │ │ ├── configurable_model.py
│ │ │ ├── default_agent_prompt.md
│ │ │ ├── editor.py
│ │ │ ├── file_ops.py
│ │ │ ├── hooks.py
│ │ │ ├── input.py
│ │ │ ├── integrations/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── sandbox_factory.py
│ │ │ │ └── sandbox_provider.py
│ │ │ ├── local_context.py
│ │ │ ├── main.py
│ │ │ ├── mcp_tools.py
│ │ │ ├── mcp_trust.py
│ │ │ ├── media_utils.py
│ │ │ ├── model_config.py
│ │ │ ├── non_interactive.py
│ │ │ ├── offload.py
│ │ │ ├── output.py
│ │ │ ├── project_utils.py
│ │ │ ├── prompts.py
│ │ │ ├── py.typed
│ │ │ ├── remote_client.py
│ │ │ ├── server.py
│ │ │ ├── server_graph.py
│ │ │ ├── server_manager.py
│ │ │ ├── sessions.py
│ │ │ ├── skills/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── commands.py
│ │ │ │ └── load.py
│ │ │ ├── subagents.py
│ │ │ ├── system_prompt.md
│ │ │ ├── textual_adapter.py
│ │ │ ├── tool_display.py
│ │ │ ├── tools.py
│ │ │ ├── ui.py
│ │ │ ├── unicode_security.py
│ │ │ ├── update_check.py
│ │ │ └── widgets/
│ │ │ ├── __init__.py
│ │ │ ├── _links.py
│ │ │ ├── approval.py
│ │ │ ├── ask_user.py
│ │ │ ├── autocomplete.py
│ │ │ ├── chat_input.py
│ │ │ ├── diff.py
│ │ │ ├── history.py
│ │ │ ├── loading.py
│ │ │ ├── mcp_viewer.py
│ │ │ ├── message_store.py
│ │ │ ├── messages.py
│ │ │ ├── model_selector.py
│ │ │ ├── status.py
│ │ │ ├── thread_selector.py
│ │ │ ├── tool_renderers.py
│ │ │ ├── tool_widgets.py
│ │ │ └── welcome.py
│ │ ├── examples/
│ │ │ └── skills/
│ │ │ ├── arxiv-search/
│ │ │ │ ├── SKILL.md
│ │ │ │ └── arxiv_search.py
│ │ │ ├── langgraph-docs/
│ │ │ │ └── SKILL.md
│ │ │ ├── skill-creator/
│ │ │ │ ├── SKILL.md
│ │ │ │ └── scripts/
│ │ │ │ ├── init_skill.py
│ │ │ │ └── quick_validate.py
│ │ │ └── web-research/
│ │ │ └── SKILL.md
│ │ ├── pyproject.toml
│ │ ├── scripts/
│ │ │ ├── check_imports.py
│ │ │ └── install.sh
│ │ └── tests/
│ │ ├── README.md
│ │ ├── integration_tests/
│ │ │ ├── __init__.py
│ │ │ ├── benchmarks/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── test_codspeed_import_benchmarks.py
│ │ │ │ └── test_startup_benchmarks.py
│ │ │ ├── conftest.py
│ │ │ ├── test_acp_mode.py
│ │ │ ├── test_compact_resume.py
│ │ │ ├── test_sandbox_factory.py
│ │ │ └── test_sandbox_operations.py
│ │ └── unit_tests/
│ │ ├── __init__.py
│ │ ├── conftest.py
│ │ ├── skills/
│ │ │ ├── __init__.py
│ │ │ ├── test_commands.py
│ │ │ ├── test_load.py
│ │ │ └── test_skills_json.py
│ │ ├── test_agent.py
│ │ ├── test_app.py
│ │ ├── test_approval.py
│ │ ├── test_args.py
│ │ ├── test_ask_user.py
│ │ ├── test_ask_user_middleware.py
│ │ ├── test_autocomplete.py
│ │ ├── test_charset.py
│ │ ├── test_chat_input.py
│ │ ├── test_command_registry.py
│ │ ├── test_compact_tool.py
│ │ ├── test_config.py
│ │ ├── test_configurable_model.py
│ │ ├── test_debug.py
│ │ ├── test_editor.py
│ │ ├── test_end_to_end.py
│ │ ├── test_exception_handling.py
│ │ ├── test_file_ops.py
│ │ ├── test_history.py
│ │ ├── test_hooks.py
│ │ ├── test_imports.py
│ │ ├── test_input_parsing.py
│ │ ├── test_local_context.py
│ │ ├── test_main.py
│ │ ├── test_main_acp_mode.py
│ │ ├── test_main_args.py
│ │ ├── test_mcp_tools.py
│ │ ├── test_mcp_trust.py
│ │ ├── test_mcp_viewer.py
│ │ ├── test_media_utils.py
│ │ ├── test_message_store.py
│ │ ├── test_messages.py
│ │ ├── test_model_config.py
│ │ ├── test_model_selector.py
│ │ ├── test_model_switch.py
│ │ ├── test_non_interactive.py
│ │ ├── test_offload.py
│ │ ├── test_output.py
│ │ ├── test_prompts.py
│ │ ├── test_reload.py
│ │ ├── test_remote_client.py
│ │ ├── test_sandbox_factory.py
│ │ ├── test_server.py
│ │ ├── test_server_config.py
│ │ ├── test_server_graph.py
│ │ ├── test_server_helpers.py
│ │ ├── test_server_manager.py
│ │ ├── test_sessions.py
│ │ ├── test_shell_allow_list.py
│ │ ├── test_status.py
│ │ ├── test_subagents.py
│ │ ├── test_textual_adapter.py
│ │ ├── test_thread_selector.py
│ │ ├── test_token_tracker.py
│ │ ├── test_ui.py
│ │ ├── test_unicode_security.py
│ │ ├── test_update_check.py
│ │ ├── test_version.py
│ │ ├── test_welcome.py
│ │ └── tools/
│ │ ├── __init__.py
│ │ └── test_fetch_url.py
│ ├── deepagents/
│ │ ├── Makefile
│ │ ├── README.md
│ │ ├── deepagents/
│ │ │ ├── __init__.py
│ │ │ ├── _models.py
│ │ │ ├── _version.py
│ │ │ ├── backends/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── composite.py
│ │ │ │ ├── filesystem.py
│ │ │ │ ├── langsmith.py
│ │ │ │ ├── local_shell.py
│ │ │ │ ├── protocol.py
│ │ │ │ ├── sandbox.py
│ │ │ │ ├── state.py
│ │ │ │ ├── store.py
│ │ │ │ └── utils.py
│ │ │ ├── base_prompt.md
│ │ │ ├── graph.py
│ │ │ ├── middleware/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _utils.py
│ │ │ │ ├── async_subagents.py
│ │ │ │ ├── filesystem.py
│ │ │ │ ├── memory.py
│ │ │ │ ├── patch_tool_calls.py
│ │ │ │ ├── skills.py
│ │ │ │ ├── subagents.py
│ │ │ │ └── summarization.py
│ │ │ └── py.typed
│ │ ├── pyproject.toml
│ │ ├── scripts/
│ │ │ └── check_imports.py
│ │ └── tests/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── integration_tests/
│ │ │ ├── __init__.py
│ │ │ ├── test_deepagents.py
│ │ │ ├── test_filesystem_middleware.py
│ │ │ ├── test_langsmith_sandbox.py
│ │ │ └── test_subagent_middleware.py
│ │ ├── unit_tests/
│ │ │ ├── __init__.py
│ │ │ ├── backends/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── test_backwards_compat.py
│ │ │ │ ├── test_composite_backend.py
│ │ │ │ ├── test_composite_backend_async.py
│ │ │ │ ├── test_file_format.py
│ │ │ │ ├── test_filesystem_backend.py
│ │ │ │ ├── test_filesystem_backend_async.py
│ │ │ │ ├── test_langsmith_sandbox.py
│ │ │ │ ├── test_local_shell_backend.py
│ │ │ │ ├── test_protocol.py
│ │ │ │ ├── test_sandbox_backend.py
│ │ │ │ ├── test_state_backend.py
│ │ │ │ ├── test_state_backend_async.py
│ │ │ │ ├── test_store_backend.py
│ │ │ │ ├── test_store_backend_async.py
│ │ │ │ ├── test_timeout_compat.py
│ │ │ │ └── test_utils.py
│ │ │ ├── chat_model.py
│ │ │ ├── middleware/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── test_compact_tool.py
│ │ │ │ ├── test_filesystem_middleware_init.py
│ │ │ │ ├── test_memory_middleware.py
│ │ │ │ ├── test_memory_middleware_async.py
│ │ │ │ ├── test_skills_middleware.py
│ │ │ │ ├── test_skills_middleware_async.py
│ │ │ │ ├── test_subagent_middleware_init.py
│ │ │ │ ├── test_summarization_factory.py
│ │ │ │ ├── test_summarization_middleware.py
│ │ │ │ └── test_tool_schemas.py
│ │ │ ├── smoke_tests/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── conftest.py
│ │ │ │ ├── snapshots/
│ │ │ │ │ ├── custom_system_message.md
│ │ │ │ │ ├── system_prompt_with_execute.md
│ │ │ │ │ ├── system_prompt_with_memory_and_skills.md
│ │ │ │ │ ├── system_prompt_with_sync_and_async_subagents.md
│ │ │ │ │ └── system_prompt_without_execute.md
│ │ │ │ └── test_system_prompt.py
│ │ │ ├── test_async_subagents.py
│ │ │ ├── test_benchmark_create_deep_agent.py
│ │ │ ├── test_end_to_end.py
│ │ │ ├── test_file_system_tools.py
│ │ │ ├── test_file_system_tools_async.py
│ │ │ ├── test_local_sandbox_operations.py
│ │ │ ├── test_local_shell.py
│ │ │ ├── test_middleware.py
│ │ │ ├── test_middleware_async.py
│ │ │ ├── test_models.py
│ │ │ ├── test_subagents.py
│ │ │ ├── test_timing.py
│ │ │ ├── test_todo_middleware.py
│ │ │ └── test_version.py
│ │ └── utils.py
│ ├── evals/
│ │ ├── Makefile
│ │ ├── README.md
│ │ ├── deepagents_evals/
│ │ │ ├── __init__.py
│ │ │ ├── categories.json
│ │ │ └── radar.py
│ │ ├── deepagents_harbor/
│ │ │ ├── __init__.py
│ │ │ ├── backend.py
│ │ │ ├── deepagents_wrapper.py
│ │ │ ├── failure.py
│ │ │ ├── langsmith.py
│ │ │ ├── metadata.py
│ │ │ └── stats.py
│ │ ├── pyproject.toml
│ │ ├── scripts/
│ │ │ ├── analyze.py
│ │ │ ├── generate_radar.py
│ │ │ └── harbor_langsmith.py
│ │ └── tests/
│ │ ├── __init__.py
│ │ ├── evals/
│ │ │ ├── README.md
│ │ │ ├── __init__.py
│ │ │ ├── conftest.py
│ │ │ ├── data/
│ │ │ │ ├── benchmark_samples/
│ │ │ │ │ ├── bfcl_v3_final.json
│ │ │ │ │ ├── frames_final.json
│ │ │ │ │ └── nexus_final.json
│ │ │ │ └── bfcl_apis/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── long_context.py
│ │ │ │ ├── message_api.py
│ │ │ │ ├── ticket_api.py
│ │ │ │ ├── trading_bot.py
│ │ │ │ ├── travel_booking.py
│ │ │ │ └── vehicle_control.py
│ │ │ ├── external_benchmarks.py
│ │ │ ├── fixtures/
│ │ │ │ └── summarization_seed_messages.json
│ │ │ ├── llm_judge.py
│ │ │ ├── memory_agent_bench/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── configs.py
│ │ │ │ ├── data_utils.py
│ │ │ │ ├── eval_utils.py
│ │ │ │ └── test_memory_agent_bench.py
│ │ │ ├── pytest_reporter.py
│ │ │ ├── tau2_airline/
│ │ │ │ ├── LICENSE
│ │ │ │ ├── __init__.py
│ │ │ │ ├── data/
│ │ │ │ │ ├── db.json
│ │ │ │ │ ├── policy.md
│ │ │ │ │ └── tasks.json
│ │ │ │ ├── domain.py
│ │ │ │ ├── evaluation.py
│ │ │ │ ├── runner.py
│ │ │ │ ├── test_tau2_airline.py
│ │ │ │ └── user_sim.py
│ │ │ ├── test__reporter_sample.py
│ │ │ ├── test_external_benchmarks.py
│ │ │ ├── test_file_operations.py
│ │ │ ├── test_followup_quality.py
│ │ │ ├── test_hitl.py
│ │ │ ├── test_memory.py
│ │ │ ├── test_memory_multiturn.py
│ │ │ ├── test_skills.py
│ │ │ ├── test_subagents.py
│ │ │ ├── test_summarization.py
│ │ │ ├── test_system_prompt.py
│ │ │ ├── test_tool_selection.py
│ │ │ ├── test_tool_usage_relational.py
│ │ │ └── utils.py
│ │ └── unit_tests/
│ │ ├── __init__.py
│ │ ├── test_category_tagging.py
│ │ ├── test_external_benchmark_helpers.py
│ │ ├── test_imports.py
│ │ ├── test_infra.py
│ │ └── test_radar.py
│ └── partners/
│ ├── daytona/
│ │ ├── LICENSE
│ │ ├── Makefile
│ │ ├── README.md
│ │ ├── langchain_daytona/
│ │ │ ├── __init__.py
│ │ │ └── sandbox.py
│ │ ├── pyproject.toml
│ │ └── tests/
│ │ ├── __init__.py
│ │ ├── integration_tests/
│ │ │ ├── __init__.py
│ │ │ └── test_integration.py
│ │ ├── test_import.py
│ │ └── unit_tests/
│ │ ├── __init__.py
│ │ └── test_import.py
│ ├── modal/
│ │ ├── LICENSE
│ │ ├── Makefile
│ │ ├── README.md
│ │ ├── langchain_modal/
│ │ │ ├── __init__.py
│ │ │ └── sandbox.py
│ │ ├── pyproject.toml
│ │ └── tests/
│ │ ├── __init__.py
│ │ ├── integration_tests/
│ │ │ ├── __init__.py
│ │ │ └── test_integration.py
│ │ ├── test_import.py
│ │ └── unit_tests/
│ │ ├── __init__.py
│ │ └── test_import.py
│ ├── quickjs/
│ │ ├── LICENSE
│ │ ├── Makefile
│ │ ├── README.md
│ │ ├── langchain_quickjs/
│ │ │ ├── __init__.py
│ │ │ ├── _foreign_function_docs.py
│ │ │ ├── _foreign_functions.py
│ │ │ └── middleware.py
│ │ ├── pyproject.toml
│ │ └── tests/
│ │ ├── __init__.py
│ │ └── unit_tests/
│ │ ├── __init__.py
│ │ ├── chat_model.py
│ │ ├── smoke_tests/
│ │ │ ├── __init__.py
│ │ │ ├── conftest.py
│ │ │ ├── snapshots/
│ │ │ │ ├── quickjs_system_prompt_mixed_foreign_functions.md
│ │ │ │ └── quickjs_system_prompt_no_tools.md
│ │ │ └── test_system_prompt.py
│ │ ├── test_end_to_end.py
│ │ ├── test_end_to_end_async.py
│ │ ├── test_foreign_function_docs.py
│ │ ├── test_import.py
│ │ └── test_system_prompt.py
│ └── runloop/
│ ├── LICENSE
│ ├── Makefile
│ ├── README.md
│ ├── langchain_runloop/
│ │ ├── __init__.py
│ │ └── sandbox.py
│ ├── pyproject.toml
│ └── tests/
│ ├── __init__.py
│ ├── integration_tests/
│ │ ├── __init__.py
│ │ └── test_integration.py
│ ├── test_import.py
│ └── unit_tests/
│ ├── __init__.py
│ └── test_import.py
└── release-please-config.json
================================================
FILE CONTENTS
================================================
================================================
FILE: .github/CODEOWNERS
================================================
# This file defines code ownership for the Deep Agents repository.
# Each line is a file pattern followed by one or more owners.
# Owners will be automatically requested for review when someone opens a pull request.
# For more information: https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
/libs/cli/ @mdrxy
================================================
FILE: .github/ISSUE_TEMPLATE/bug-report.yml
================================================
name: "\U0001F41B Bug Report"
description: Report a bug in Deep Agents. To report a security issue, please instead use the security option below. For questions, please use the Deep Agents forum (below).
labels: ["bug"]
type: bug
body:
- type: markdown
attributes:
value: |
> **All contributions must be in English.** See the [language policy](https://docs.langchain.com/oss/python/contributing/overview#language-policy).
Thank you for taking the time to file a bug report.
For usage questions and general design questions, please use the [Deep Agents Forum](https://forum.langchain.com/c/oss-product-help-lc-and-lg/deep-agents/18).
Check these before submitting to see if your issue has already been reported, fixed or if there's another way to solve your problem:
Check these before submitting to see if your issue has already been reported, fixed or if there's another way to solve your problem:
* [Documentation](https://docs.langchain.com/oss/python/deepagents/overview),
* [API Reference Documentation](https://reference.langchain.com/python/deepagents/),
* [LangChain ChatBot](https://chat.langchain.com/)
* [GitHub search](https://github.com/langchain-ai/deepagents),
* [Deep Agents Forum](https://forum.langchain.com/c/oss-product-help-lc-and-lg/deep-agents/18),
**Note:** For bug fixes, please feel free to open a PR contributing a failing test. However, please do not begin to work on a fix unless explicitly assigned to this issue by a maintainer.
- type: checkboxes
id: checks
attributes:
label: Checked other resources
description: Please confirm the following.
options:
- label: This is a bug, not a usage question.
required: true
- label: I added a clear and descriptive title.
required: true
- label: I searched existing issues and didn't find this.
required: true
- label: I can reproduce this with the latest released version.
required: true
- label: I included a minimal reproducible example and steps to reproduce.
required: true
- type: checkboxes
id: package
attributes:
label: Area (Required)
description: Which area of the repository does this issue relate to? Select at least one.
options:
- label: deepagents (SDK)
- label: cli
- type: textarea
id: related
validations:
required: false
attributes:
label: Related Issues / PRs
description: |
If this bug is related to any existing issues or pull requests, please link them here.
placeholder: |
* e.g. #123, #456
- type: textarea
id: reproduction
validations:
required: true
attributes:
label: Reproduction Steps / Example Code (Python)
description: |
Please add a self-contained, [minimal, reproducible, example](https://stackoverflow.com/help/minimal-reproducible-example) with your use case.
If a maintainer can copy it, run it, and see it right away, there's a much higher chance that you'll be able to get help.
**Important!**
* Avoid screenshots, as they are hard to read and (more importantly) don't allow others to copy-and-paste your code.
* Reduce your code to the minimum required to reproduce the issue if possible.
(This will be automatically formatted into code, so no need for backticks.)
render: python
- type: textarea
id: error
attributes:
label: Error Message and Stack Trace (if applicable)
description: |
If you are reporting an error, please copy and paste the full error message and
stack trace.
(This will be automatically formatted into code, so no need for backticks.)
render: shell
- type: textarea
id: description
attributes:
label: Description
description: |
What is the problem, question, or error?
Write a short description telling what you are doing, what you expect to happen, and what is currently happening.
placeholder: |
* I'm trying to use the `deepagents` library to do X.
* I expect to see Y.
* Instead, it does Z.
validations:
required: true
- type: textarea
id: system-info
attributes:
label: Environment / System Info
description: Provide OS, Python version, `deepagents` and `langchain` versions, and any relevant env vars.
placeholder: |
OS:
Python: 3.x.x
deepagents: 0.x.y
deepagents-cli: 0.x.y
- type: markdown
attributes:
value: |
Thanks for helping improve Deep Agents.
================================================
FILE: .github/ISSUE_TEMPLATE/config.yml
================================================
blank_issues_enabled: false
version: 2.1
contact_links:
- name: 💬 Deep Agents Forum
url: https://forum.langchain.com/c/oss-product-help-lc-and-lg/deep-agents/18
about: General community discussions and support
- name: 📚 Deep Agents Documentation
url: https://docs.langchain.com/oss/python/deepagents/overview
about: View the official Deep Agents documentation
- name: 📚 API Reference Documentation
url: https://reference.langchain.com/python/deepagents/
about: View the official Deep Agents API reference documentation
- name: 📚 Documentation issue
url: https://github.com/langchain-ai/docs/issues/new?template=05-deepagents.yml
about: Report an issue related to the Deep Agents documentation
================================================
FILE: .github/ISSUE_TEMPLATE/feature-request.yml
================================================
name: "✨ Feature Request"
description: Request a new feature or enhancement for Deep Agents. For questions, please use the Deep Agents forum (below).
labels: ["feature request"]
type: feature
body:
- type: markdown
attributes:
value: |
> **All contributions must be in English.** See the [language policy](https://docs.langchain.com/oss/python/contributing/overview#language-policy).
Thank you for taking the time to request a new feature.
Use this to request NEW FEATURES or ENHANCEMENTS in Deep Agents. For bug reports, please use the bug report template. For usage questions and general design questions, please use the [Deep Agents Forum](https://forum.langchain.com/c/oss-product-help-lc-and-lg/deep-agents/18).
Relevant links to check before filing a feature request to see if your request has already been made or
if there's another way to achieve what you want:
* [Documentation](https://docs.langchain.com/oss/python/deepagents/overview),
* [API Reference Documentation](https://reference.langchain.com/python/deepagents/),
* [LangChain ChatBot](https://chat.langchain.com/)
* [GitHub search](https://github.com/langchain-ai/deepagents),
* [Deep Agents Forum](https://forum.langchain.com/c/oss-product-help-lc-and-lg/deep-agents/18),
**Note:** Do not begin work on a PR unless explicitly assigned to this issue by a maintainer.
- type: checkboxes
id: checks
attributes:
label: Checked other resources
description: Please confirm the following.
options:
- label: This is a feature request, not a bug report.
required: true
- label: I searched existing issues and didn't find this feature.
required: true
- label: I checked the docs and README for existing functionality.
required: true
- label: This request applies to this repo (deepagents) and not an external package.
required: true
- type: checkboxes
id: package
attributes:
label: Area (Required)
description: Which area of the repository does this request relate to? Select at least one.
options:
- label: deepagents (SDK)
- label: cli
- type: textarea
id: feature-description
validations:
required: true
attributes:
label: Feature description
description: What would you like to see added to Deep Agents? Be specific.
- type: textarea
id: proposed-solution
attributes:
label: Proposed solution (optional)
description: If you have an idea how to implement this, describe it here. Include API examples if relevant.
- type: textarea
id: additional-context
attributes:
label: Additional context (optional)
description: Links, examples, or related issues
- type: markdown
attributes:
value: |
Thanks for helping improve Deep Agents.
================================================
FILE: .github/ISSUE_TEMPLATE/privileged.yml
================================================
name: "\U0001F512 Privileged"
description: You are a Deep Agents maintainer. If not, check the other options.
body:
- type: markdown
attributes:
value: |
> **All contributions must be in English.** See the [language policy](https://docs.langchain.com/oss/python/contributing/overview#language-policy).
If you are not a Deep Agents maintainer, employee, or were not asked directly by a maintainer to create an issue, then please start the conversation on the [Deep Agents Forum](https://forum.langchain.com/c/oss-product-help-lc-and-lg/deep-agents/18) instead.
**Note:** Do not begin work on a PR unless explicitly assigned to this issue by a maintainer.
- type: checkboxes
id: privileged
attributes:
label: Privileged issue
description: Confirm that you are allowed to create an issue here.
options:
- label: I am a Deep Agents maintainer.
required: true
- type: textarea
id: content
attributes:
label: Issue Content
description: Add the content of the issue here.
- type: checkboxes
id: package
attributes:
label: Area (Required)
description: |
Please select area(s) that this issue is related to.
options:
- label: deepagents (SDK)
- label: cli
- label: Other / not sure / general
- type: markdown
attributes:
value: |
Please do not begin work on a PR unless explicitly assigned to this issue by a maintainer.
================================================
FILE: .github/PULL_REQUEST_TEMPLATE.md
================================================
Fixes #
<!-- Replace everything above this line with a 1-2 sentence description of your change. Keep the "Fixes #xx" keyword and update the issue number. -->
Read the full contributing guidelines: https://docs.langchain.com/oss/python/contributing/overview
> **All contributions must be in English.** See the [language policy](https://docs.langchain.com/oss/python/contributing/overview#language-policy).
If you paste a large clearly AI generated description here your PR may be IGNORED or CLOSED!
Thank you for contributing to Deep Agents! Follow these steps to have your pull request considered as ready for review.
1. PR title: Should follow the format: TYPE(SCOPE): DESCRIPTION
- Examples:
- fix(sdk): resolve flag parsing error
- feat(cli): add multi-tenant support
- test(acp): update API usage tests
- Allowed TYPE and SCOPE values: https://github.com/langchain-ai/deepagents/blob/main/.github/workflows/pr_lint.yml#L15-L26
2. PR description:
- Write 1-2 sentences summarizing the change.
- If this PR addresses a specific issue, please include "Fixes #ISSUE_NUMBER" in the description to automatically close the issue when the PR is merged.
- If there are any breaking changes, please clearly describe them.
- If this PR depends on another PR being merged first, please include "Depends on #PR_NUMBER" in the description.
3. Run `make format`, `make lint` and `make test` from the root of the package(s) you've modified.
- We will not consider a PR unless these three are passing in CI.
4. How did you verify your code works?
Additional guidelines:
- We ask that if you use generative AI for your contribution, you include a disclaimer.
- PRs should not touch more than one package unless absolutely necessary.
- Do not update the `uv.lock` files or add dependencies to `pyproject.toml` files (even optional ones) unless you have explicit permission to do so by a maintainer.
## Social handles (optional)
<!-- If you'd like a shoutout on release, add your socials below -->
Twitter: @
LinkedIn: https://linkedin.com/in/
================================================
FILE: .github/RELEASING.md
================================================
# CLI Release Process
This document describes the release process for the CLI package (`libs/cli`) in the Deep Agents monorepo using [release-please](https://github.com/googleapis/release-please).
## Overview
CLI releases are managed via release-please, which:
1. Analyzes conventional commits on the `main` branch
2. Creates/updates a release PR with changelog and version bump
3. When merged, creates a GitHub release and publishes to PyPI
## How It Works
### Automatic Release PRs
When commits land on `main`, release-please analyzes them and either:
- **Creates a new release PR** if releasable changes exist
- **Updates an existing release PR** with additional changes
- **Does nothing** if no releasable commits are found (e.g. commits with type `chore`, `refactor`, etc.)
Release PRs are created on branches named `release-please--branches--main--components--<package>`.
### Triggering a Release
To release the CLI:
1. Merge conventional commits to `main` (see [Commit Format](#commit-format))
2. Wait for release-please to create/update the release PR
3. Review the generated changelog in the PR
4. **Verify the SDK pin** — check that `deepagents==` in `libs/cli/pyproject.toml` is up to date. If the latest SDK version has been confirmed compatible, you should bump the pin on `main` and let release-please regenerate the PR before merging. See [Release Failed: CLI SDK Pin Mismatch](#release-failed-cli-sdk-pin-mismatch) for recovery if this is missed.
5. Merge the release PR — this triggers the build, pre-release checks, PyPI publish, and GitHub release
> [!IMPORTANT]
> When developing CLI features that depend on new SDK functionality, bump the SDK pin as part of that work — don't defer it to release time. The pin should always reflect the minimum SDK version the CLI actually requires!
### Version Bumping
Version bumps are determined by commit types:
| Commit Type | Version Bump | Example |
| ------------------------------ | ------------- | ---------------------------------------- |
| `fix:` | Patch (0.0.x) | `fix(cli): resolve config loading issue` |
| `feat:` | Minor (0.x.0) | `feat(cli): add new export command` |
| `feat!:` or `BREAKING CHANGE:` | Major (x.0.0) | `feat(cli)!: redesign config format` |
> [!NOTE]
> While version is < 1.0.0, `bump-minor-pre-major` and `bump-patch-for-minor-pre-major` are enabled, so breaking changes bump minor and features bump patch.
## Commit Format
All commits must follow [Conventional Commits](https://www.conventionalcommits.org/) format with types and scopes defined in `.github/workflows/pr_lint.yml`:
```text
<type>(<scope>): <description>
[optional body]
[optional footer(s)]
```
### Examples
```bash
# Patch release
fix(cli): resolve type hinting issue
# Minor release
feat(cli): add new chat completion feature
# Major release (breaking change)
feat(cli)!: redesign configuration format
BREAKING CHANGE: Config files now use TOML instead of JSON.
```
## Configuration Files
### `release-please-config.json`
Defines release-please behavior for each package.
### `.release-please-manifest.json`
Tracks the current version of each package:
```json
{
"libs/cli": "0.0.17"
}
```
This file is automatically updated by release-please when releases are created.
## Release Workflow
### Detection Mechanism
The release-please workflow (`.github/workflows/release-please.yml`) detects a CLI release by checking if `libs/cli/CHANGELOG.md` was modified in the commit. This file is always updated by release-please when merging a release PR.
### Lockfile Updates
When release-please creates or updates a release PR, the `update-lockfiles` job automatically regenerates `uv.lock` files since release-please updates `pyproject.toml` versions but doesn't regenerate lockfiles. An up-to-date lockfile is necessary for the cli since it depends on the SDK, and `libs/evals` depends on the CLI.
### Release Pipeline
The release workflow (`.github/workflows/release.yml`) runs when a release PR is merged:
1. **Build** - Creates distribution package
2. **Collect Contributors** - Gathers PR authors for release notes, including social media handles. Excludes members of `langchain-ai`.
3. **Release Notes** - Extracts changelog or generates from git log
4. **Test PyPI** - Publishes to test.pypi.org for validation
5. **Pre-release Checks** - Runs tests against the built package
6. **Publish** - Publishes to PyPI
7. **Mark Release** - Creates a published GitHub release with the built artifacts
### Release PR Labels
Release-please uses labels to track the state of release PRs:
| Label | Meaning |
| ----- | ------- |
| `autorelease: pending` | Release PR has been merged but not yet tagged/released |
| `autorelease: tagged` | Release PR has been successfully tagged and released |
Because `skip-github-release: true` is set in the release-please config (we create releases via our own workflow instead of release-please), our `release.yml` workflow must update these labels manually. After successfully creating the GitHub release and tag, the `mark-release` job transitions the label from `pending` to `tagged`.
This label transition signals to release-please that the merged PR has been fully processed, allowing it to create new release PRs for subsequent commits.
## Manual Release
For hotfixes or exceptional cases, you can trigger a release manually. Use the `hotfix` commit type so as to not trigger a further PR update/version bump.
1. Go to **Actions** > **Package Release**
2. Click **Run workflow**
3. Select the package to release (`deepagents-cli` only for exception/recovery/hotfix scenarios; otherwise use release-please)
4. (Optionally enable `dangerous-nonmain-release` for hotfix branches)
> [!WARNING]
> Manual releases should be rare. Prefer the standard release-please flow for the CLI. Manual dispatch bypasses the changelog detection in `release-please.yml` and skips the lockfile update job. Only use it for recovery scenarios (e.g., the release workflow failed after the release PR was already merged).
## Troubleshooting
### "Found release tag with component X, but not configured in manifest" Warnings
You may see warnings in the release-please logs like:
```txt
⚠ Found release tag with component 'deepagents=', but not configured in manifest
```
This is **harmless**. Release-please scans existing tags in the repository and warns when it finds tags for packages that aren't in the current configuration. The `deepagents` SDK package has existing release tags (`deepagents==0.x.x`) but is not currently managed by release-please.
These warnings will disappear once the SDK is added to `release-please-config.json`. Until then, they can be safely ignored—they don't affect CLI releases.
### Unexpected Commit Authors in Release PRs
When viewing a release-please PR on GitHub, you may see commits attributed to contributors who didn't directly push to that PR. For example:
```txt
johndoe and others added 3 commits 4 minutes ago
```
This is a **GitHub UI quirk** caused by force pushes/rebasing, not actual commits to the PR branch.
**What's happening:**
1. release-please rebases its branch onto the latest `main`
2. The PR branch now includes commits from `main` as parent commits
3. GitHub's UI shows all "new" commits that appeared after the force push, including rebased parents
**The actual PR commits** are only:
- The release commit (e.g., `release(deepagents-cli): 0.0.18`)
- The lockfile update commit (e.g., `chore: update lockfiles`)
Other commits shown are just the base that the PR branch was rebased onto. This is normal behavior and doesn't indicate unauthorized access.
### Release PR Stuck with "autorelease: pending" Label
If a release PR shows `autorelease: pending` after the release workflow completed, the label update step may have failed. This can block release-please from creating new release PRs.
**To fix manually:**
```bash
# Find the PR number for the release commit
gh pr list --state merged --search "release(deepagents-cli)" --limit 5
# Update the label
gh pr edit <PR_NUMBER> --remove-label "autorelease: pending" --add-label "autorelease: tagged"
```
The label update is non-fatal in the workflow (`|| true`), so the release itself succeeded—only the label needs fixing.
### Yanking a Release
If you need to yank (retract) a release:
#### 1. Yank from PyPI
Using the PyPI web interface or a CLI tool.
#### 2. Delete GitHub Release/Tag (optional)
```bash
# Delete the GitHub release
gh release delete "deepagents-cli==<VERSION>" --yes
# Delete the git tag
git tag -d "deepagents-cli==<VERSION>"
git push origin --delete "deepagents-cli==<VERSION>"
```
#### 3. Fix the Manifest
Edit `.release-please-manifest.json` to the last good version:
```json
{
"libs/cli": "0.0.15"
}
```
Also update `libs/cli/pyproject.toml` and `_version.py` to match.
### Release Failed: CLI SDK Pin Mismatch
If the release workflow fails at the "Verify CLI pins latest SDK version" step with:
```txt
CLI SDK pin does not match SDK version!
SDK version (libs/deepagents/pyproject.toml): 0.4.2
CLI SDK pin (libs/cli/pyproject.toml): 0.4.1
```
This means the CLI's pinned `deepagents` dependency in `libs/cli/pyproject.toml` doesn't match the current SDK version. This can happen when the SDK is released independently and the CLI's pin isn't updated before the CLI release PR is merged.
**To fix:**
1. **Hotfix the pin on `main`:**
```bash
# Update the pin in libs/cli/pyproject.toml
# e.g., change deepagents==0.4.1 to deepagents==0.4.2
cd libs/cli && uv lock
git add libs/cli/pyproject.toml libs/cli/uv.lock
git commit -m "hotfix(cli): bump SDK pin to <VERSION>"
git push origin main
```
2. **Manually trigger the release** (the push to `main` won't re-trigger the release because the commit doesn't modify `libs/cli/CHANGELOG.md`):
- Go to **Actions** > **Package Release**
- Click **Run workflow**
- Select `main` branch and `deepagents-cli` package
3. **Verify the `autorelease: pending` label was swapped.** The `mark-release` job will attempt to find the release PR by label and update it automatically, even on manual dispatch. If the label wasn't swapped (e.g., the job failed), fix it manually — see [Release PR Stuck with "autorelease: pending" Label](#release-pr-stuck-with-autorelease-pending-label). **If you skip this step, release-please will not create new release PRs.**
### Re-releasing a Version
PyPI does not allow re-uploading the same version. If a release failed partway:
1. If already on PyPI: bump the version and release again
2. If only on test PyPI: the workflow uses `skip-existing: true`, so re-running should work
3. If the GitHub release exists but PyPI publish failed (e.g., from a manual re-run): delete the release/tag and re-run the workflow
### "Untagged, merged release PRs outstanding" Error
If release-please logs show:
```txt
⚠ There are untagged, merged release PRs outstanding - aborting
```
This means a release PR was merged but its merge commit doesn't have the expected tag. This can happen if:
- The release workflow failed and the tag was manually created on a different commit (e.g., a hotfix)
- Someone manually moved or recreated a tag
**To diagnose**, compare the tag's commit with the release PR's merge commit:
```bash
# Find what commit the tag points to
git ls-remote --tags origin | grep "deepagents-cli==<VERSION>"
# Find the release PR's merge commit
gh pr view <PR_NUMBER> --json mergeCommit --jq '.mergeCommit.oid'
```
If these differ, release-please is confused.
**To fix**, move the tag and update the GitHub release:
```bash
# 1. Delete the remote tag
git push origin :refs/tags/deepagents-cli==<VERSION>
# 2. Delete local tag if it exists
git tag -d deepagents-cli==<VERSION> 2>/dev/null || true
# 3. Create tag on the correct commit (the release PR's merge commit)
git tag deepagents-cli==<VERSION> <MERGE_COMMIT_SHA>
# 4. Push the new tag
git push origin deepagents-cli==<VERSION>
# 5. Update the GitHub release's target_commitish to match
# (moving a tag doesn't update this field automatically)
gh api -X PATCH repos/langchain-ai/deepagents/releases/$(gh api repos/langchain-ai/deepagents/releases --jq '.[] | select(.tag_name == "deepagents-cli==<VERSION>") | .id') \
-f target_commitish=<MERGE_COMMIT_SHA>
```
After fixing, the next push to main should properly create new release PRs.
> [!NOTE]
> If the package was already published to PyPI and you need to re-run the workflow, it uses `skip-existing: true` on test PyPI, so it will succeed without re-uploading.
## References
- [release-please documentation](https://github.com/googleapis/release-please)
- [Conventional Commits](https://www.conventionalcommits.org/)
- [PyPI Trusted Publishing](https://docs.pypi.org/trusted-publishers/)
================================================
FILE: .github/actions/uv_setup/action.yml
================================================
# Helper to set up Python and uv with caching
name: uv-install
description: Set up Python and uv with caching
inputs:
python-version:
description: Python version, supporting MAJOR.MINOR only
required: true
enable-cache:
description: Enable caching for uv dependencies
required: false
default: "true"
cache-suffix:
description: Custom cache key suffix for cache invalidation
required: false
default: ""
working-directory:
description: Working directory for cache glob scoping
required: false
default: "**"
env:
UV_VERSION: "0.5.25"
runs:
using: composite
steps:
- name: Install uv and set the python version
uses: astral-sh/setup-uv@v7
with:
version: ${{ env.UV_VERSION }}
python-version: ${{ inputs.python-version }}
enable-cache: ${{ inputs.enable-cache }}
cache-dependency-glob: |
${{ inputs.working-directory }}/pyproject.toml
${{ inputs.working-directory }}/uv.lock
${{ inputs.working-directory }}/requirements*.txt
cache-suffix: ${{ inputs.cache-suffix }}
================================================
FILE: .github/dependabot.yml
================================================
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
day: "monday"
groups:
github-actions:
patterns: ["*"]
- package-ecosystem: "uv"
directories:
- "/libs/deepagents"
- "/libs/cli"
- "/libs/evals"
- "/libs/acp"
- "/libs/partners/daytona"
- "/examples/content-builder-agent"
- "/examples/deep_research"
- "/examples/text-to-sql-agent"
schedule:
interval: "weekly"
day: "monday"
groups:
pip-dependencies:
patterns: ["*"]
================================================
FILE: .github/scripts/aggregate_evals.py
================================================
from __future__ import annotations
import glob
import json
import os
import sys
from pathlib import Path
from tabulate import tabulate
def _format_table(rows: list[dict[str, object]], headers: list[str]) -> list[list[object]]:
"""Build tabulate-ready rows from report dicts."""
return [
[
str(r.get("model", "")),
r.get("passed", 0),
r.get("failed", 0),
r.get("skipped", 0),
r.get("total", 0),
r.get("correctness", 0.0),
r.get("solve_rate") or "n/a",
r.get("step_ratio") or "n/a",
r.get("tool_call_ratio") or "n/a",
r.get("median_duration_s", 0.0),
]
for r in rows
]
_COLALIGN = ("left", "right", "right", "right", "right", "right", "right", "right", "right", "right")
_HEADERS = [
"model",
"passed",
"failed",
"skipped",
"total",
"correctness",
"solve_rate",
"step_ratio",
"tool_call_ratio",
"median_duration_s",
]
_CATEGORIES_JSON = Path(__file__).resolve().parents[2] / "libs" / "evals" / "deepagents_evals" / "categories.json"
def _load_category_labels() -> dict[str, str]:
"""Load human-readable category labels from `categories.json`.
Returns:
Mapping of category name to display label, or empty dict on failure.
"""
try:
return json.loads(_CATEGORIES_JSON.read_text(encoding="utf-8"))["labels"]
except (FileNotFoundError, json.JSONDecodeError, KeyError) as exc:
print(f"warning: could not load category labels from {_CATEGORIES_JSON}: {exc}", file=sys.stderr)
return {}
def _build_category_table(rows: list[dict[str, object]]) -> list[str]:
"""Build a per-category scores table from report rows.
Returns a single-element list containing the rendered Markdown table
string, or an empty list when no category data is present.
Args:
rows: Report row dicts, each expected to contain a `category_scores`
mapping and a `model` string.
"""
# Collect all categories across all models (preserving insertion order).
all_cats: list[str] = list(dict.fromkeys(
cat
for r in rows
for cat in (r.get("category_scores") or {})
))
if not all_cats:
return []
labels = _load_category_labels()
headers = ["model", *[labels.get(c, c) for c in all_cats]]
table_rows: list[list[object]] = []
for r in rows:
scores = r.get("category_scores") or {}
table_rows.append([
str(r.get("model", "")),
*[scores.get(c, "—") for c in all_cats],
])
colalign = ("left", *("right" for _ in all_cats))
return [tabulate(table_rows, headers=headers, tablefmt="github", colalign=colalign)]
def main() -> None:
"""Generate an aggregated report."""
report_files = sorted(glob.glob("evals_artifacts/**/evals_report.json", recursive=True))
rows: list[dict[str, object]] = []
for file in report_files:
payload = json.loads(Path(file).read_text(encoding="utf-8"))
rows.append(payload)
# --- JSON artifact for offline analysis ---
summary_json_path = Path("evals_summary.json")
summary_json_path.write_text(json.dumps(rows, indent=2, sort_keys=True) + "\n", encoding="utf-8")
# --- Table 1: grouped by provider, then correctness desc ---
by_provider = sorted(
rows,
key=lambda r: (str(r.get("model", "")).split(":")[0], -float(r.get("correctness", 0.0))),
)
lines: list[str] = []
lines.append("## Evals summary")
lines.append("")
table_rows = _format_table(by_provider, _HEADERS)
if table_rows:
lines.append(
tabulate(table_rows, headers=_HEADERS, tablefmt="github", colalign=_COLALIGN)
)
else:
lines.append("_No eval artifacts found._")
# --- Table 2: ranked by correctness desc, then solve_rate desc ---
by_correctness = sorted(
rows,
key=lambda r: (-float(r.get("correctness", 0.0)), -float(r.get("solve_rate") or 0.0)),
)
lines.append("")
lines.append("## Ranked by correctness / solve rate")
lines.append("")
ranked_rows = _format_table(by_correctness, _HEADERS)
if ranked_rows:
lines.append(
tabulate(ranked_rows, headers=_HEADERS, tablefmt="github", colalign=_COLALIGN)
)
else:
lines.append("_No eval artifacts found._")
# --- Table 3: per-category scores ---
cat_table = _build_category_table(rows)
if cat_table:
lines.append("")
lines.append("## Per-category correctness")
lines.append("")
lines.extend(cat_table)
summary_file = os.environ.get("GITHUB_STEP_SUMMARY")
if summary_file:
Path(summary_file).write_text("\n".join(lines) + "\n", encoding="utf-8")
print("\n".join(lines))
if __name__ == "__main__":
main()
================================================
FILE: .github/scripts/check_extras_sync.py
================================================
"""Check that optional extras stay in sync with required dependencies (openai).
When a package appears in both [project.dependencies] and
[project.optional-dependencies], we ensure their version constraints match.
This prevents silent version drift (e.g. bumping a required dep but
forgetting the corresponding extra).
"""
import sys
import tomllib
from pathlib import Path
from re import compile as re_compile
# Matches the package name at the start of a PEP 508 dependency string.
# Handles both hyphenated and underscored names (PEP 503 normalizes these).
_NAME_RE = re_compile(r"^([A-Za-z0-9]([A-Za-z0-9._-]*[A-Za-z0-9])?)")
def _normalize(name: str) -> str:
"""PEP 503 normalize a package name for comparison.
Returns:
Lowercased, underscore-normalized package name.
"""
return name.lower().replace("-", "_").replace(".", "_")
def _parse_dep(dep: str) -> tuple[str, str]:
"""Return (normalized_name, version_spec) from a PEP 508 string.
Returns:
Tuple of normalized package name and version specifier.
Raises:
ValueError: If the dependency string cannot be parsed.
"""
match = _NAME_RE.match(dep)
if not match:
msg = f"Cannot parse dependency: {dep}"
raise ValueError(msg)
name = match.group(1)
version_spec = dep[match.end() :].strip()
return _normalize(name), version_spec
def main(pyproject_path: Path) -> int:
"""Check extras sync and return exit code (0 = pass, 1 = mismatch).
Returns:
0 if all extras match, 1 if there are mismatches.
"""
with pyproject_path.open("rb") as f:
data = tomllib.load(f)
required: dict[str, str] = {}
for dep in data.get("project", {}).get("dependencies", []):
name, spec = _parse_dep(dep)
required[name] = spec
mismatches: list[str] = []
optional = data.get("project", {}).get("optional-dependencies", {})
for group, deps in optional.items():
for dep in deps:
name, spec = _parse_dep(dep)
if name in required and spec != required[name]:
mismatches.append(
f" [{group}] {name}: extra has '{spec}' "
f"but required dep has '{required[name]}'"
)
if mismatches:
print("Extra / required dependency version mismatch:")
print("\n".join(mismatches))
print(
"\nUpdate the optional extras in [project.optional-dependencies] "
"to match [project.dependencies]."
)
return 1
print("All extras are in sync with required dependencies.")
return 0
if __name__ == "__main__":
path = Path(sys.argv[1]) if len(sys.argv) > 1 else Path("pyproject.toml")
raise SystemExit(main(path))
================================================
FILE: .github/scripts/check_version_equality.py
================================================
"""Check that pyproject.toml and _version.py versions stay in sync.
Prevents releases with mismatched version numbers across the SDK and CLI
packages. Used by the CI workflow in .github/workflows/check_versions.yml
and as a pre-commit hook.
"""
import re
import sys
import tomllib
from pathlib import Path
PACKAGES = [
("libs/deepagents/pyproject.toml", "libs/deepagents/deepagents/_version.py"),
("libs/cli/pyproject.toml", "libs/cli/deepagents_cli/_version.py"),
]
_VERSION_RE = re.compile(r'^__version__\s*=\s*"([^"]+)"', re.MULTILINE)
def _get_pyproject_version(path: Path) -> str:
"""Extract version from pyproject.toml.
Args:
path: Path to pyproject.toml.
Returns:
Version string.
"""
with path.open("rb") as f:
data = tomllib.load(f)
try:
return data["project"]["version"]
except KeyError:
msg = f"Could not find project.version in {path}"
raise ValueError(msg) from None
def _get_version_py(path: Path) -> str:
"""Extract __version__ from _version.py.
Args:
path: Path to _version.py.
Returns:
Version string.
Raises:
ValueError: If __version__ is not found.
"""
text = path.read_text()
match = _VERSION_RE.search(text)
if not match:
msg = f"Could not find __version__ in {path}"
raise ValueError(msg)
return match.group(1)
def main() -> int:
"""Check version equality across packages.
Returns:
0 if all versions match, 1 if there are mismatches.
"""
root = Path(__file__).resolve().parents[2]
errors: list[str] = []
for pyproject_rel, version_py_rel in PACKAGES:
pyproject_path = root / pyproject_rel
version_py_path = root / version_py_rel
missing = [p for p in (pyproject_path, version_py_path) if not p.exists()]
if missing:
errors.append(
f" {pyproject_rel.split('/')[1]}: file(s) not found: "
+ ", ".join(str(m) for m in missing)
)
continue
pyproject_ver = _get_pyproject_version(pyproject_path)
version_py_ver = _get_version_py(version_py_path)
if pyproject_ver != version_py_ver:
pkg = pyproject_path.parent.name
errors.append(
f" {pkg}: pyproject.toml={pyproject_ver}, "
f"_version.py={version_py_ver}"
)
else:
print(f"{pyproject_path.parent.name} versions match: {pyproject_ver}")
if errors:
print("Version mismatch detected:")
print("\n".join(errors))
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
================================================
FILE: .github/scripts/models.py
================================================
"""Unified model registry for eval and harbor GitHub Actions workflows.
Single source of truth for all model definitions. Each model is declared once
with tags encoding workflow and group membership.
Usage:
python .github/scripts/models.py eval # reads EVAL_MODELS env var
python .github/scripts/models.py harbor # reads HARBOR_MODELS env var
Env var values: a preset name (e.g. "all", "set0", "anthropic"), or
comma-separated "provider:model" specs.
"""
from __future__ import annotations
import json
import os
import re
import sys
from typing import NamedTuple
_SAFE_SPEC_RE = re.compile(r"^[a-zA-Z0-9:_\-./]+$")
"""Allowed characters in model specs: alphanumeric, colon, hyphen, underscore,
dot, slash.
Rejects shell metacharacters ($, `, ;, |, &, (, ), etc.).
"""
class Model(NamedTuple):
"""A model spec with group tags."""
spec: str
groups: frozenset[str]
# ---------------------------------------------------------------------------
# Registry — canonical order determines output order within each preset.
# Tags follow the convention {workflow}:{group}.
# ---------------------------------------------------------------------------
REGISTRY: tuple[Model, ...] = (
# -- Anthropic --
Model(
"anthropic:claude-haiku-4-5-20251001",
frozenset({"eval:set0", "eval:set1"}),
),
Model(
"anthropic:claude-sonnet-4-20250514",
frozenset({"eval:set0", "harbor:anthropic"}),
),
Model(
"anthropic:claude-sonnet-4-5-20250929",
frozenset({"eval:set0", "harbor:anthropic"}),
),
Model(
"anthropic:claude-sonnet-4-6",
frozenset({"eval:set0", "eval:set1", "harbor:anthropic"}),
),
Model(
"anthropic:claude-opus-4-1",
frozenset({"eval:set0", "harbor:anthropic"}),
),
Model(
"anthropic:claude-opus-4-5-20251101",
frozenset({"eval:set0", "harbor:anthropic"}),
),
Model(
"anthropic:claude-opus-4-6",
frozenset({"eval:set0", "eval:set1", "harbor:anthropic"}),
),
# -- OpenAI --
Model("openai:gpt-4o", frozenset({"eval:set0"})),
Model("openai:gpt-4o-mini", frozenset({"eval:set0"})),
Model(
"openai:gpt-4.1",
frozenset({"eval:set0", "eval:set1", "harbor:openai"}),
),
Model("openai:o3", frozenset({"eval:set0", "harbor:openai"})),
Model("openai:o4-mini", frozenset({"eval:set0", "harbor:openai"})),
Model("openai:gpt-5.1-codex", frozenset({"eval:set0"})),
Model("openai:gpt-5.2-codex", frozenset({"eval:set0", "eval:set1"})),
Model(
"openai:gpt-5.4",
frozenset({"eval:set0", "eval:set1", "harbor:openai"}),
),
# -- Google --
Model("google_genai:gemini-2.5-flash", frozenset({"eval:set0"})),
Model("google_genai:gemini-2.5-pro", frozenset({"eval:set0", "eval:set1"})),
Model("google_genai:gemini-3-flash-preview", frozenset({"eval:set0"})),
Model(
"google_genai:gemini-3.1-pro-preview",
frozenset({"eval:set0", "eval:set1"}),
),
# -- OpenRouter --
Model(
"openrouter:minimax/minimax-m2.7",
frozenset({"eval:set0", "eval:open"}),
),
# -- Baseten --
Model(
"baseten:zai-org/GLM-5",
frozenset({"eval:set0", "eval:set1", "eval:open", "harbor:baseten"}),
),
Model(
"baseten:MiniMaxAI/MiniMax-M2.5",
frozenset({"eval:set0", "eval:set1", "harbor:baseten"}),
),
Model(
"baseten:moonshotai/Kimi-K2.5",
frozenset({"eval:set0", "harbor:baseten"}),
),
Model(
"baseten:deepseek-ai/DeepSeek-V3.2",
frozenset({"eval:set0", "harbor:baseten"}),
),
Model(
"baseten:Qwen/Qwen3-Coder-480B-A35B-Instruct",
frozenset({"eval:set0", "harbor:baseten"}),
),
# -- Fireworks --
Model(
"fireworks:fireworks/qwen3-vl-235b-a22b-thinking",
frozenset({"eval:set0", "eval:set1"}),
),
Model("fireworks:fireworks/deepseek-v3-0324", frozenset({"eval:set0"})),
Model("fireworks:fireworks/minimax-m2p1", frozenset({"eval:set0"})),
Model("fireworks:fireworks/kimi-k2p5", frozenset({"eval:set0"})),
Model("fireworks:fireworks/glm-5", frozenset({"eval:set0"})),
Model("fireworks:fireworks/minimax-m2p5", frozenset({"eval:set0"})),
# -- Ollama (SET1 + SET2) --
Model("ollama:glm-5", frozenset({"eval:set1", "eval:set2"})),
Model("ollama:minimax-m2.5", frozenset({"eval:set1", "eval:set2"})),
Model("ollama:qwen3.5:397b-cloud", frozenset({"eval:set1", "eval:set2"})),
# -- Groq (SET2) --
Model("groq:openai/gpt-oss-120b", frozenset({"eval:set2"})),
Model("groq:qwen/qwen3-32b", frozenset({"eval:set2"})),
Model("groq:moonshotai/kimi-k2-instruct", frozenset({"eval:set2"})),
# -- xAI (SET2) --
Model("xai:grok-4", frozenset({"eval:set2"})),
Model("xai:grok-3-mini-fast", frozenset({"eval:set2"})),
# -- Ollama (SET2 only) --
Model("ollama:nemotron-3-nano:30b", frozenset({"eval:set2"})),
Model("ollama:cogito-2.1:671b", frozenset({"eval:set2"})),
Model("ollama:devstral-2:123b", frozenset({"eval:set2"})),
Model("ollama:ministral-3:14b", frozenset({"eval:set2"})),
Model("ollama:qwen3-next:80b", frozenset({"eval:set2"})),
Model("ollama:qwen3-coder:480b-cloud", frozenset({"eval:set2"})),
Model("ollama:deepseek-v3.2:cloud", frozenset({"eval:set2"})),
# -- NVIDIA (OPEN) --
Model(
"nvidia:nvidia/nemotron-3-super-120b-a12b",
frozenset({"eval:open"}),
),
)
# ---------------------------------------------------------------------------
# Preset definitions — map preset names to tag filters per workflow.
# None means "any tag with the workflow prefix" (i.e. the "all" preset).
# ---------------------------------------------------------------------------
_EVAL_PRESETS: dict[str, str | None] = {
"all": None,
"set0": "eval:set0",
"set1": "eval:set1",
"set2": "eval:set2",
"open": "eval:open",
}
_HARBOR_PRESETS: dict[str, str | None] = {
"all": None,
"anthropic": "harbor:anthropic",
"openai": "harbor:openai",
"baseten": "harbor:baseten",
}
_WORKFLOW_CONFIG: dict[str, tuple[str, dict[str, str | None]]] = {
"eval": ("EVAL_MODELS", _EVAL_PRESETS),
"harbor": ("HARBOR_MODELS", _HARBOR_PRESETS),
}
def _filter_by_tag(prefix: str, tag: str | None) -> list[str]:
"""Return model specs matching a tag filter, in REGISTRY order."""
if tag is not None:
return [m.spec for m in REGISTRY if tag in m.groups]
return [m.spec for m in REGISTRY if any(g.startswith(prefix) for g in m.groups)]
def _resolve_models(workflow: str, selection: str) -> list[str]:
"""Resolve a selection string to a list of model specs.
Args:
workflow: "eval" or "harbor".
selection: A preset name, or comma-separated "provider:model" specs.
Returns:
Ordered list of model spec strings.
Raises:
ValueError: If the selection is empty or contains invalid specs.
"""
env_var, presets = _WORKFLOW_CONFIG[workflow]
normalized = selection.strip()
if normalized in presets:
return _filter_by_tag(f"{workflow}:", presets[normalized])
specs = [s.strip() for s in normalized.split(",") if s.strip()]
if not specs:
msg = f"No models resolved from {env_var} (got empty or whitespace-only input)"
raise ValueError(msg)
invalid = [s for s in specs if ":" not in s]
if invalid:
msg = f"Invalid model spec(s) (expected 'provider:model'): {', '.join(repr(s) for s in invalid)}"
raise ValueError(msg)
unsafe = [s for s in specs if not _SAFE_SPEC_RE.match(s)]
if unsafe:
msg = f"Model spec(s) contain disallowed characters: {', '.join(repr(s) for s in unsafe)}"
raise ValueError(msg)
return specs
def main() -> None:
"""Entry point — reads workflow arg and env var, writes matrix JSON."""
if len(sys.argv) != 2 or sys.argv[1] not in _WORKFLOW_CONFIG: # noqa: PLR2004
msg = f"Usage: {sys.argv[0]} {{{' | '.join(_WORKFLOW_CONFIG)}}}"
raise SystemExit(msg)
workflow = sys.argv[1]
env_var, _ = _WORKFLOW_CONFIG[workflow]
selection = os.environ.get(env_var, "all")
models = _resolve_models(workflow, selection)
matrix = {"model": models}
github_output = os.environ.get("GITHUB_OUTPUT")
line = f"matrix={json.dumps(matrix, separators=(',', ':'))}"
if github_output:
with open(github_output, "a") as f: # noqa: PTH123
f.write(line + "\n")
else:
print(line) # noqa: T201
if __name__ == "__main__":
main()
================================================
FILE: .github/scripts/pr-labeler-config.json
================================================
{
"org": "langchain-ai",
"trustedThreshold": 5,
"labelColor": "b76e79",
"sizeThresholds": [
{
"label": "size: XS",
"max": 50
},
{
"label": "size: S",
"max": 200
},
{
"label": "size: M",
"max": 500
},
{
"label": "size: L",
"max": 1000
},
{
"label": "size: XL"
}
],
"excludedFiles": [
"uv.lock"
],
"excludedPaths": [
"docs/"
],
"typeToLabel": {
"feat": "feature",
"fix": "fix",
"docs": "documentation",
"hotfix": "hotfix",
"style": "linting",
"refactor": "refactor",
"perf": "performance",
"test": "tests",
"build": "infra",
"ci": "infra",
"chore": "infra",
"revert": "revert",
"release": "release",
"breaking": "breaking"
},
"scopeToLabel": {
"acp": "acp",
"ci": "infra",
"cli": "cli",
"cli-gha": "cli",
"daytona": "daytona",
"deepagents": "deepagents",
"deepagents-cli": "cli",
"deps": "dependencies",
"docs": "documentation",
"evals": "evals",
"examples": "examples",
"harbor": "evals",
"infra": "infra",
"sdk": "deepagents"
},
"fileRules": [
{
"label": "deepagents",
"prefix": "libs/deepagents/",
"skipExcludedFiles": true
},
{
"label": "cli",
"prefix": "libs/cli/",
"skipExcludedFiles": true
},
{
"label": "acp",
"prefix": "libs/acp/",
"skipExcludedFiles": true
},
{
"label": "evals",
"prefix": "libs/evals/",
"skipExcludedFiles": true
},
{
"label": "cli",
"exact": "action.yml"
},
{
"label": "github_actions",
"exact": "action.yml"
},
{
"label": "github_actions",
"prefix": ".github/workflows/"
},
{
"label": "github_actions",
"prefix": ".github/actions/"
},
{
"label": "dependencies",
"suffix": "pyproject.toml"
},
{
"label": "dependencies",
"exact": "uv.lock"
},
{
"label": "dependencies",
"pattern": "(?:^|/)requirements[^/]*\\.txt$"
}
]
}
================================================
FILE: .github/scripts/pr-labeler.js
================================================
// Shared helpers for pr_labeler.yml and tag-external-issues.yml.
//
// Usage from actions/github-script (requires actions/checkout first):
// const { h } = require('./.github/scripts/pr-labeler.js').loadAndInit(github, owner, repo, core);
const fs = require('fs');
const path = require('path');
function loadConfig() {
const configPath = path.join(__dirname, 'pr-labeler-config.json');
let raw;
try {
raw = fs.readFileSync(configPath, 'utf8');
} catch (e) {
throw new Error(`Failed to read ${configPath}: ${e.message}`);
}
let config;
try {
config = JSON.parse(raw);
} catch (e) {
throw new Error(`Failed to parse pr-labeler-config.json: ${e.message}`);
}
const required = [
'labelColor', 'sizeThresholds', 'fileRules',
'typeToLabel', 'scopeToLabel', 'trustedThreshold',
'excludedFiles', 'excludedPaths',
];
const missing = required.filter(k => !(k in config));
if (missing.length > 0) {
throw new Error(`pr-labeler-config.json missing required keys: ${missing.join(', ')}`);
}
return config;
}
function init(github, owner, repo, config, core) {
if (!core) {
throw new Error('init() requires a `core` parameter (e.g., from actions/github-script)');
}
const {
trustedThreshold,
labelColor,
sizeThresholds,
scopeToLabel,
typeToLabel,
fileRules: fileRulesDef,
excludedFiles,
excludedPaths,
} = config;
const sizeLabels = sizeThresholds.map(t => t.label);
const allTypeLabels = [...new Set(Object.values(typeToLabel))];
const tierLabels = ['new-contributor', 'trusted-contributor'];
// ── Label management ──────────────────────────────────────────────
async function ensureLabel(name, color = labelColor) {
try {
await github.rest.issues.getLabel({ owner, repo, name });
} catch (e) {
if (e.status !== 404) throw e;
try {
await github.rest.issues.createLabel({ owner, repo, name, color });
} catch (createErr) {
// 422 = label created by a concurrent run between our get and create
if (createErr.status !== 422) throw createErr;
core.info(`Label "${name}" creation returned 422 (likely already exists)`);
}
}
}
// ── Size calculation ──────────────────────────────────────────────
function getSizeLabel(totalChanged) {
for (const t of sizeThresholds) {
if (t.max != null && totalChanged < t.max) return t.label;
}
// Last entry has no max — it's the catch-all (XL)
return sizeThresholds[sizeThresholds.length - 1].label;
}
function computeSize(files) {
const excluded = new Set(excludedFiles);
const totalChanged = files.reduce((sum, f) => {
const p = f.filename ?? '';
const base = p.split('/').pop();
if (excluded.has(base)) return sum;
for (const prefix of excludedPaths) {
if (p.startsWith(prefix)) return sum;
}
return sum + (f.additions ?? 0) + (f.deletions ?? 0);
}, 0);
return { totalChanged, sizeLabel: getSizeLabel(totalChanged) };
}
// ── File-based labels ─────────────────────────────────────────────
function buildFileRules() {
return fileRulesDef.map((rule, i) => {
let test;
if (rule.prefix) test = p => p.startsWith(rule.prefix);
else if (rule.suffix) test = p => p.endsWith(rule.suffix);
else if (rule.exact) test = p => p === rule.exact;
else if (rule.pattern) {
const re = new RegExp(rule.pattern);
test = p => re.test(p);
} else {
throw new Error(
`fileRules[${i}] (label: "${rule.label}") has no recognized matcher ` +
`(expected one of: prefix, suffix, exact, pattern)`
);
}
return { label: rule.label, test, skipExcluded: !!rule.skipExcludedFiles };
});
}
function matchFileLabels(files, fileRules) {
const rules = fileRules || buildFileRules();
const excluded = new Set(excludedFiles);
const labels = new Set();
for (const rule of rules) {
// skipExcluded: ignore files whose basename is in the top-level
// "excludedFiles" list (e.g. uv.lock) so lockfile-only changes
// don't trigger package labels.
const candidates = rule.skipExcluded
? files.filter(f => !excluded.has((f.filename ?? '').split('/').pop()))
: files;
if (candidates.some(f => rule.test(f.filename ?? ''))) {
labels.add(rule.label);
}
}
return labels;
}
// ── Title-based labels ────────────────────────────────────────────
function matchTitleLabels(title) {
const labels = new Set();
const m = (title ?? '').match(/^(\w+)(?:\(([^)]+)\))?(!)?:/);
if (!m) return { labels, type: null, typeLabel: null, scopes: [], breaking: false };
const type = m[1].toLowerCase();
const scopeStr = m[2] ?? '';
const breaking = !!m[3];
const typeLabel = typeToLabel[type] || null;
if (typeLabel) labels.add(typeLabel);
if (breaking) labels.add('breaking');
const scopes = scopeStr.split(',').map(s => s.trim()).filter(Boolean);
for (const scope of scopes) {
const sl = scopeToLabel[scope];
if (sl) labels.add(sl);
}
return { labels, type, typeLabel, scopes, breaking };
}
// ── Org membership ────────────────────────────────────────────────
async function checkMembership(author, userType) {
if (userType === 'Bot') {
console.log(`${author} is a Bot — treating as internal`);
return { isExternal: false };
}
try {
const membership = await github.rest.orgs.getMembershipForUser({
org: 'langchain-ai',
username: author,
});
const isExternal = membership.data.state !== 'active';
console.log(
isExternal
? `${author} has pending membership — treating as external`
: `${author} is an active member of langchain-ai`,
);
return { isExternal };
} catch (e) {
if (e.status === 404) {
console.log(`${author} is not a member of langchain-ai`);
return { isExternal: true };
}
// Non-404 errors (rate limit, auth failure, server error) must not
// silently default to external — rethrow to fail the step.
throw new Error(
`Membership check failed for ${author} (${e.status}): ${e.message}`,
);
}
}
// ── Contributor analysis ──────────────────────────────────────────
async function getContributorInfo(contributorCache, author, userType) {
if (contributorCache.has(author)) return contributorCache.get(author);
const { isExternal } = await checkMembership(author, userType);
let mergedCount = null;
if (isExternal) {
try {
const result = await github.rest.search.issuesAndPullRequests({
q: `repo:${owner}/${repo} is:pr is:merged author:"${author}"`,
per_page: 1,
});
mergedCount = result?.data?.total_count ?? null;
} catch (e) {
if (e?.status !== 422) throw e;
core.warning(`Search failed for ${author}; skipping tier.`);
}
}
const info = { isExternal, mergedCount };
contributorCache.set(author, info);
return info;
}
// ── Tier label resolution ───────────────────────────────────────────
async function applyTierLabel(issueNumber, author, { skipNewContributor = false } = {}) {
let mergedCount;
try {
const result = await github.rest.search.issuesAndPullRequests({
q: `repo:${owner}/${repo} is:pr is:merged author:"${author}"`,
per_page: 1,
});
mergedCount = result?.data?.total_count;
} catch (error) {
if (error?.status !== 422) throw error;
core.warning(`Search failed for ${author}; skipping tier label.`);
return;
}
if (mergedCount == null) {
core.warning(`Search response missing total_count for ${author}; skipping tier label.`);
return;
}
let tierLabel = null;
if (mergedCount >= trustedThreshold) tierLabel = 'trusted-contributor';
else if (mergedCount === 0 && !skipNewContributor) tierLabel = 'new-contributor';
if (tierLabel) {
await ensureLabel(tierLabel);
await github.rest.issues.addLabels({
owner, repo, issue_number: issueNumber, labels: [tierLabel],
});
console.log(`Applied '${tierLabel}' to #${issueNumber} (${mergedCount} merged PRs)`);
} else {
console.log(`No tier label for ${author} (${mergedCount} merged PRs)`);
}
return tierLabel;
}
return {
ensureLabel,
getSizeLabel,
computeSize,
buildFileRules,
matchFileLabels,
matchTitleLabels,
allTypeLabels,
checkMembership,
getContributorInfo,
applyTierLabel,
sizeLabels,
tierLabels,
trustedThreshold,
labelColor,
};
}
function loadAndInit(github, owner, repo, core) {
const config = loadConfig();
return { config, h: init(github, owner, repo, config, core) };
}
module.exports = { loadConfig, init, loadAndInit };
================================================
FILE: .github/workflows/_benchmark.yml
================================================
# Reusable workflow: CodSpeed wall-time benchmarks
#
# Runs pytest-benchmark tests under CodSpeed instrumentation so that
# regressions are tracked across commits on the CodSpeed dashboard.
#
# Authenticates via OpenID Connect (OIDC) — no repository secret required.
name: "Benchmark"
on:
workflow_call:
inputs:
working-directory:
description: "Package directory (e.g. libs/deepagents)"
required: true
type: string
python-version:
description: "Python version"
required: false
type: string
# Pin 3.13.11 — CodSpeed walltime segfaults on 3.13.12+
# https://github.com/CodSpeedHQ/pytest-codspeed/issues/106
default: "3.13.11"
env:
UV_NO_SYNC: "true"
jobs:
benchmark:
name: "CodSpeed"
runs-on: ubuntu-latest
permissions:
contents: read
id-token: write
defaults:
run:
working-directory: ${{ inputs.working-directory }}
steps:
- name: "Checkout"
uses: actions/checkout@v6
- name: "Set up Python + uv"
uses: ./.github/actions/uv_setup
with:
python-version: ${{ inputs.python-version }}
working-directory: ${{ inputs.working-directory }}
enable-cache: "true"
cache-suffix: benchmark-${{ inputs.python-version }}
- name: "Install dependencies"
run: uv sync --group test
- name: "Run benchmarks"
uses: CodSpeedHQ/action@v4
with:
working-directory: ${{ inputs.working-directory }}
run: uv run --no-sync pytest ./tests -m benchmark --codspeed
mode: walltime
================================================
FILE: .github/workflows/_lint.yml
================================================
# Reusable workflow for running linting
name: "🧹 Linting"
on:
workflow_call:
inputs:
working-directory:
required: true
type: string
description: "From which folder this pipeline executes"
python-version:
required: true
type: string
description: "Python version to use"
permissions:
contents: read
env:
WORKDIR: ${{ inputs.working-directory == '' && '.' || inputs.working-directory }}
RUFF_OUTPUT_FORMAT: github
LINT: minimal
UV_FROZEN: "true"
jobs:
build:
name: "Python ${{ inputs.python-version }}"
runs-on: ubuntu-latest
timeout-minutes: 20
steps:
- name: "📋 Checkout Code"
uses: actions/checkout@v6
- name: "🐍 Set up Python ${{ inputs.python-version }} + UV"
uses: "./.github/actions/uv_setup"
with:
python-version: ${{ inputs.python-version }}
cache-suffix: lint-${{ inputs.working-directory }}
working-directory: ${{ inputs.working-directory }}
- name: "📦 Install Dependencies"
working-directory: ${{ inputs.working-directory }}
run: |
uv sync --group test
- name: "🔍 Run Linters"
working-directory: ${{ inputs.working-directory }}
run: |
make lint
================================================
FILE: .github/workflows/_test.yml
================================================
# Reusable workflow for running unit tests
name: "🧪 Unit Testing"
on:
workflow_call:
inputs:
working-directory:
required: true
type: string
description: "From which folder this pipeline executes"
python-version:
required: true
type: string
description: "Python version to use"
coverage:
required: false
type: boolean
default: true
description: "Collect coverage (disable to speed up non-primary matrix legs)"
permissions:
contents: read
env:
UV_NO_SYNC: "true"
UV_FROZEN: "true"
jobs:
build:
defaults:
run:
working-directory: ${{ inputs.working-directory }}
runs-on: ubuntu-latest
timeout-minutes: 20
name: "Python ${{ inputs.python-version }}"
steps:
- name: "📋 Checkout Code"
uses: actions/checkout@v6
- name: "🐍 Set up Python ${{ inputs.python-version }} + UV"
uses: "./.github/actions/uv_setup"
id: setup-python
with:
python-version: ${{ inputs.python-version }}
cache-suffix: test-${{ inputs.working-directory }}
working-directory: ${{ inputs.working-directory }}
- name: "📦 Install Test Dependencies"
shell: bash
run: uv sync --group test
- name: "🧪 Run Unit Tests"
shell: bash
env:
RUN_SANDBOX_TESTS: "true"
run: |
if [ "${{ inputs.coverage }}" = "false" ]; then
make test COV_ARGS= PYTEST_EXTRA=-q
else
make test PYTEST_EXTRA=-q
fi
- name: "🧹 Verify Clean Working Directory"
shell: bash
run: |
set -eu
STATUS="$(git status)"
echo "$STATUS"
echo "$STATUS" | grep 'nothing to commit, working tree clean'
================================================
FILE: .github/workflows/auto-label-by-package.yml
================================================
name: Auto Label Issues by Package
on:
issues:
types: [opened, edited]
jobs:
label-by-package:
permissions:
issues: write
runs-on: ubuntu-latest
steps:
- name: Sync package labels
uses: actions/github-script@v8
with:
script: |
const body = context.payload.issue.body || "";
// Extract text under "### Area" (handles " (Required)" suffix and being last section)
const match = body.match(/### Area[^\n]*\n([\s\S]*?)(?:\n###|$)/i);
if (!match) return;
const packageSection = match[1].trim();
// Mapping table for package names to labels
const mapping = {
"deepagents (SDK)": "deepagents",
"cli": "cli",
};
// All possible package labels we manage
const allPackageLabels = Object.values(mapping);
const selectedLabels = [];
// Check if this is checkbox format (multiple selection)
const checkboxMatches = packageSection.match(/- \[x\]\s+([^\n\r]+)/gi);
if (checkboxMatches) {
// Handle checkbox format
for (const match of checkboxMatches) {
const packageName = match.replace(/- \[x\]\s+/i, '').trim();
const label = mapping[packageName];
if (label && !selectedLabels.includes(label)) {
selectedLabels.push(label);
}
}
} else {
// Handle dropdown format (single selection)
const label = mapping[packageSection];
if (label) {
selectedLabels.push(label);
}
}
// Get current issue labels
const issue = await github.rest.issues.get({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number
});
const currentLabels = issue.data.labels.map(label => label.name);
const currentPackageLabels = currentLabels.filter(label => allPackageLabels.includes(label));
// Determine labels to add and remove
const labelsToAdd = selectedLabels.filter(label => !currentPackageLabels.includes(label));
const labelsToRemove = currentPackageLabels.filter(label => !selectedLabels.includes(label));
// Add new labels
if (labelsToAdd.length > 0) {
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
labels: labelsToAdd
});
}
// Remove old labels
for (const label of labelsToRemove) {
await github.rest.issues.removeLabel({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
name: label
});
}
================================================
FILE: .github/workflows/check_extras_sync.yml
================================================
# Ensures optional extras stay in sync with required dependencies.
#
# When a package appears in both [project.dependencies] and
# [project.optional-dependencies], the version constraints must match.
# Only runs when pyproject.toml is modified.
name: "🔍 Check Extras Sync"
on:
pull_request:
paths:
- "libs/cli/pyproject.toml"
push:
branches: [main]
paths:
- "libs/cli/pyproject.toml"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
check-extras-sync:
name: "Verify extras match required deps"
runs-on: ubuntu-latest
timeout-minutes: 2
steps:
- name: "📋 Checkout Code"
uses: actions/checkout@v6
- name: "🐍 Set up Python and uv"
uses: "./.github/actions/uv_setup"
with:
python-version: "3.14"
enable-cache: "false"
- name: "🔍 Check extras sync"
run: python .github/scripts/check_extras_sync.py libs/cli/pyproject.toml
================================================
FILE: .github/workflows/check_lockfiles.yml
================================================
# Check that all uv.lock files are up-to-date
#
# Prevents PRs from being merged when lockfiles are out of sync with pyproject.toml
name: "🔒 Check Lockfiles"
on:
push:
branches: [main]
pull_request:
merge_group:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
check-lockfiles:
name: "Verify uv.lock files"
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- name: "📋 Checkout Code"
uses: actions/checkout@v6
- name: "🐍 Set up Python and uv"
uses: "./.github/actions/uv_setup"
with:
python-version: "3.14"
- name: "🔍 Check all lockfiles"
run: make lock-check
================================================
FILE: .github/workflows/check_sdk_pin.yml
================================================
# Advisory check: posts a comment on CLI release PRs when the deepagents SDK
# pin drifts from the actual SDK version. Does not block merge — the release
# workflow enforces the pin at publish time. Removes the comment once resolved.
# See also: release.yml "Verify CLI pins latest SDK version" step (hard gate).
name: "🔗 Check SDK Pin"
on:
pull_request:
paths:
- "libs/deepagents/pyproject.toml"
- "libs/cli/pyproject.toml"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
pull-requests: write
jobs:
check-sdk-pin:
if: startsWith(github.head_ref, 'release-please--branches--main--components--deepagents-cli')
runs-on: ubuntu-latest
timeout-minutes: 2
steps:
- uses: actions/checkout@v6
- name: Compare SDK version to CLI pin
id: check
run: |
SDK_VERSION=$(sed -nE 's/^version = "([0-9]+\.[0-9]+\.[0-9]+)".*/\1/p' libs/deepagents/pyproject.toml | head -1)
if [[ -z "$SDK_VERSION" ]]; then
echo "::error file=libs/deepagents/pyproject.toml::Failed to extract SDK version. Expected a line matching: version = \"X.Y.Z\""
exit 1
fi
CLI_SDK_PIN=$(sed -nE 's/.*deepagents==([0-9]+\.[0-9]+\.[0-9]+).*/\1/p' libs/cli/pyproject.toml | head -1)
if [[ -z "$CLI_SDK_PIN" ]]; then
echo "::error file=libs/cli/pyproject.toml::Failed to extract CLI SDK pin. Expected a dependency matching: deepagents==X.Y.Z"
exit 1
fi
echo "sdk_version=$SDK_VERSION" >> "$GITHUB_OUTPUT"
echo "cli_pin=$CLI_SDK_PIN" >> "$GITHUB_OUTPUT"
echo "match=$( [ "$SDK_VERSION" = "$CLI_SDK_PIN" ] && echo true || echo false )" >> "$GITHUB_OUTPUT"
- name: Manage PR comment
uses: actions/github-script@v8
env:
SDK_VERSION: ${{ steps.check.outputs.sdk_version }}
CLI_PIN: ${{ steps.check.outputs.cli_pin }}
PIN_MATCH: ${{ steps.check.outputs.match }}
with:
script: |
// Hidden HTML marker to identify comments posted by this workflow.
const marker = '<!-- sdk-pin-check -->';
const { owner, repo } = context.repo;
const prNumber = context.payload.pull_request.number;
const comments = await github.paginate(
github.rest.issues.listComments,
{ owner, repo, issue_number: prNumber, per_page: 100 },
);
const existing = comments.find(c => c.body?.includes(marker));
const match = process.env.PIN_MATCH === 'true';
const sdkVersion = process.env.SDK_VERSION;
const cliPin = process.env.CLI_PIN;
if (!sdkVersion || !cliPin) {
core.setFailed(
`Version extraction returned empty values. SDK: "${sdkVersion}", CLI pin: "${cliPin}". ` +
'Check that libs/deepagents/pyproject.toml and libs/cli/pyproject.toml have the expected format.'
);
return;
}
if (match && existing) {
try {
await github.rest.issues.deleteComment({
owner, repo,
comment_id: existing.id,
});
core.info('Pin matches — removed stale warning comment.');
} catch (error) {
// 404 = comment was already deleted (concurrent run or manual removal)
if (error.status === 404) {
core.info('Stale comment already deleted.');
} else {
core.warning(
`Failed to delete stale SDK pin warning comment (${error.status}): ${error.message}. ` +
'The outdated warning may still be visible on the PR.'
);
}
}
} else if (match) {
core.info(`SDK pin matches: deepagents==${sdkVersion}. No action needed.`);
} else {
const body = [
marker,
'> [!WARNING]',
'> **SDK pin mismatch** — the CLI release workflow will fail at the "Verify CLI pins latest SDK version" step until this is resolved.',
'>',
'> | | Version |',
'> |---|---|',
`> | SDK (\`libs/deepagents/pyproject.toml\`) | \`${sdkVersion}\` |`,
`> | CLI pin (\`libs/cli/pyproject.toml\`) | \`${cliPin}\` |`,
'>',
`> **To fix:** update \`libs/cli/pyproject.toml\` to pin \`deepagents==${sdkVersion}\`, then run \`cd libs/cli && uv lock\` and commit the lockfile update.`,
'>',
'> **To bypass:** if you intentionally need to pin an older SDK version, re-run the release workflow with `dangerous-skip-sdk-pin-check` enabled. Ensure the CLI does not contain any code that depends on functionality introduced in the newer SDK version — otherwise the published CLI will fail at runtime.',
'>',
'> See [`.github/RELEASING.md`](https://github.com/langchain-ai/deepagents/blob/main/.github/RELEASING.md#release-failed-cli-sdk-pin-mismatch) for the full recovery procedure.',
].join('\n');
try {
// Update silently (no workflow annotation) to avoid repeated warnings on re-pushes.
if (existing) {
await github.rest.issues.updateComment({
owner, repo,
comment_id: existing.id,
body,
});
core.info('Updated existing warning comment.');
} else {
await github.rest.issues.createComment({
owner, repo,
issue_number: prNumber,
body,
});
}
} catch (error) {
core.warning(
`Could not post/update PR comment (status ${error.status}): ${error.message}. ` +
`The mismatch still exists: CLI pins deepagents==${cliPin} but SDK is ${sdkVersion}.`
);
}
// Always emit annotation regardless of comment success.
core.warning(`CLI pins deepagents==${cliPin} but SDK is ${sdkVersion}`);
}
================================================
FILE: .github/workflows/check_versions.yml
================================================
# Ensures version numbers in pyproject.toml and _version.py stay in sync.
#
# (Prevents releases with mismatched version numbers)
name: "🔍 Check Version Equality"
on:
pull_request:
paths:
- "libs/deepagents/pyproject.toml"
- "libs/deepagents/deepagents/_version.py"
- "libs/cli/pyproject.toml"
- "libs/cli/deepagents_cli/_version.py"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
check_version_equality:
runs-on: ubuntu-latest
timeout-minutes: 2
steps:
- uses: actions/checkout@v6
- name: "🐍 Set up Python and uv"
uses: "./.github/actions/uv_setup"
with:
python-version: "3.14"
enable-cache: "false"
- name: "✅ Verify pyproject.toml & _version.py Match"
run: python .github/scripts/check_version_equality.py
================================================
FILE: .github/workflows/ci.yml
================================================
# Main CI workflow for Deep Agents monorepo
#
# Runs on every pull request:
# - Linting for changed packages
# - Unit Tests for changed packages
#
# Only packages with changes are tested. SDK changes also trigger CLI tests.
# Pushes to main and workflow changes run full CI.
name: "🔧 CI"
on:
push:
branches: [main]
pull_request:
merge_group:
# Cancel redundant workflow runs
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
# Required for CodSpeed OIDC authentication in _benchmark.yml
id-token: write
env:
UV_NO_SYNC: "true"
jobs:
# Detect which packages have changes
changes:
name: "🔍 Detect Changes"
runs-on: ubuntu-latest
outputs:
deepagents: ${{ steps.filter.outputs.deepagents }}
cli: ${{ steps.filter.outputs.cli }}
evals: ${{ steps.filter.outputs.evals }}
daytona: ${{ steps.filter.outputs.daytona }}
modal: ${{ steps.filter.outputs.modal }}
runloop: ${{ steps.filter.outputs.runloop }}
quickjs: ${{ steps.filter.outputs.quickjs }}
steps:
- name: "📋 Checkout Code"
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: "🔍 Check for changes"
uses: dorny/paths-filter@v4
id: filter
with:
# Each package filter includes workflow/action paths so that CI
# infrastructure changes are validated against all packages.
#
# NOTE: Do NOT add negation patterns (e.g. '!libs/foo/**/*.md')
# here. dorny/paths-filter evaluates patterns with OR logic, so a
# negation like '!libs/deepagents/**/*.md' becomes "match anything
# NOT in that glob" — causing unrelated files (e.g. .github/
# templates) to match every filter and trigger full CI.
# See: https://github.com/dorny/paths-filter/issues/97
filters: |
deepagents:
- 'libs/deepagents/**'
- '.github/workflows/ci.yml'
- '.github/workflows/_lint.yml'
- '.github/workflows/_test.yml'
- '.github/actions/**'
cli:
- 'libs/cli/**'
- 'libs/deepagents/**'
- '.github/workflows/ci.yml'
- '.github/workflows/_lint.yml'
- '.github/workflows/_test.yml'
- '.github/workflows/_benchmark.yml'
- '.github/actions/**'
evals:
- 'libs/evals/**'
- '.github/workflows/ci.yml'
- '.github/workflows/_lint.yml'
- '.github/workflows/_test.yml'
- '.github/actions/**'
daytona:
- 'libs/partners/daytona/**'
- '.github/workflows/ci.yml'
- '.github/workflows/_lint.yml'
- '.github/workflows/_test.yml'
- '.github/actions/**'
modal:
- 'libs/partners/modal/**'
- '.github/workflows/ci.yml'
- '.github/workflows/_lint.yml'
- '.github/workflows/_test.yml'
- '.github/actions/**'
runloop:
- 'libs/partners/runloop/**'
- '.github/workflows/ci.yml'
- '.github/workflows/_lint.yml'
- '.github/workflows/_test.yml'
- '.github/actions/**'
quickjs:
- 'libs/partners/quickjs/**'
- '.github/workflows/ci.yml'
- '.github/workflows/_lint.yml'
- '.github/workflows/_test.yml'
- '.github/actions/**'
# Run linting on changed packages
lint-deepagents:
name: "🧹 Lint deepagents"
needs: changes
if: needs.changes.outputs.deepagents == 'true' || github.event_name == 'push'
uses: ./.github/workflows/_lint.yml
with:
working-directory: "libs/deepagents"
python-version: "3.11"
lint-cli:
name: "🧹 Lint cli"
needs: changes
if: needs.changes.outputs.cli == 'true' || github.event_name == 'push'
uses: ./.github/workflows/_lint.yml
with:
working-directory: "libs/cli"
python-version: "3.11"
lint-evals:
name: "🧹 Lint evals"
needs: changes
if: needs.changes.outputs.evals == 'true' || github.event_name == 'push'
uses: ./.github/workflows/_lint.yml
with:
working-directory: "libs/evals"
python-version: "3.14"
lint-daytona:
name: "🧹 Lint daytona"
needs: changes
if: needs.changes.outputs.daytona == 'true' || github.event_name == 'push'
uses: ./.github/workflows/_lint.yml
with:
working-directory: "libs/partners/daytona"
python-version: "3.11"
lint-modal:
name: "🧹 Lint modal"
needs: changes
if: needs.changes.outputs.modal == 'true' || github.event_name == 'push'
uses: ./.github/workflows/_lint.yml
with:
working-directory: "libs/partners/modal"
python-version: "3.11"
lint-runloop:
name: "🧹 Lint runloop"
needs: changes
if: needs.changes.outputs.runloop == 'true' || github.event_name == 'push'
uses: ./.github/workflows/_lint.yml
with:
working-directory: "libs/partners/runloop"
python-version: "3.11"
lint-quickjs:
name: "🧹 Lint quickjs"
needs: changes
if: needs.changes.outputs.quickjs == 'true' || github.event_name == 'push'
uses: ./.github/workflows/_lint.yml
with:
working-directory: "libs/partners/quickjs"
python-version: "3.11"
# Run unit tests on changed packages
test-deepagents:
name: "🧪 Test deepagents"
needs: changes
if: needs.changes.outputs.deepagents == 'true' || github.event_name == 'push'
strategy:
matrix:
python-version: ["3.11", "3.12", "3.13", "3.14"]
fail-fast: false
uses: ./.github/workflows/_test.yml
with:
working-directory: "libs/deepagents"
python-version: ${{ matrix.python-version }}
coverage: ${{ matrix.python-version == '3.12' }}
test-cli:
name: "🧪 Test cli"
needs: changes
if: needs.changes.outputs.cli == 'true' || github.event_name == 'push'
strategy:
matrix:
python-version: ["3.11", "3.12", "3.13", "3.14"]
fail-fast: false
uses: ./.github/workflows/_test.yml
with:
working-directory: "libs/cli"
python-version: ${{ matrix.python-version }}
coverage: ${{ matrix.python-version == '3.12' }}
test-evals:
name: "🧪 Test evals"
needs: changes
if: needs.changes.outputs.evals == 'true' || github.event_name == 'push'
strategy:
matrix:
python-version: ["3.12", "3.13", "3.14"]
fail-fast: false
uses: ./.github/workflows/_test.yml
with:
working-directory: "libs/evals"
python-version: ${{ matrix.python-version }}
coverage: ${{ matrix.python-version == '3.12' }}
test-daytona:
name: "🧪 Test daytona"
needs: changes
if: needs.changes.outputs.daytona == 'true' || github.event_name == 'push'
strategy:
matrix:
python-version: ["3.11", "3.12", "3.13", "3.14"]
fail-fast: false
uses: ./.github/workflows/_test.yml
with:
working-directory: "libs/partners/daytona"
python-version: ${{ matrix.python-version }}
coverage: ${{ matrix.python-version == '3.12' }}
test-modal:
name: "🧪 Test modal"
needs: changes
if: needs.changes.outputs.modal == 'true' || github.event_name == 'push'
strategy:
matrix:
python-version: ["3.11", "3.12", "3.13", "3.14"]
fail-fast: false
uses: ./.github/workflows/_test.yml
with:
working-directory: "libs/partners/modal"
python-version: ${{ matrix.python-version }}
coverage: ${{ matrix.python-version == '3.12' }}
test-runloop:
name: "🧪 Test runloop"
needs: changes
if: needs.changes.outputs.runloop == 'true' || github.event_name == 'push'
strategy:
matrix:
python-version: ["3.11", "3.12", "3.13", "3.14"]
fail-fast: false
uses: ./.github/workflows/_test.yml
with:
working-directory: "libs/partners/runloop"
python-version: ${{ matrix.python-version }}
coverage: ${{ matrix.python-version == '3.12' }}
# Run CodSpeed benchmarks on SDK changes
benchmark-deepagents:
name: "⏱️ Benchmark deepagents"
needs: changes
# TODO: re-enable once CodSpeed integration is ready
#if: needs.changes.outputs.deepagents == 'true' || github.event_name == 'push'
if: false
uses: ./.github/workflows/_benchmark.yml
with:
working-directory: "libs/deepagents"
secrets: inherit
# Run CodSpeed benchmarks on CLI changes
benchmark-cli:
name: "⏱️ Benchmark cli"
needs: changes
if: needs.changes.outputs.cli == 'true' || github.event_name == 'push'
uses: ./.github/workflows/_benchmark.yml
with:
working-directory: "libs/cli"
secrets: inherit
# Final status check - ensures all jobs passed
ci_success:
name: "✅ CI Success"
needs:
- changes
- lint-deepagents
- lint-cli
- lint-evals
- lint-daytona
- lint-modal
- lint-runloop
- lint-quickjs
- test-deepagents
- test-cli
- test-evals
- test-daytona
- test-modal
- test-runloop
- benchmark-deepagents
- benchmark-cli
if: always()
runs-on: ubuntu-latest
steps:
- name: "🎉 All Checks Passed"
run: |
# Get all job results (excluding 'changes' which always succeeds)
results='${{ toJSON(needs.*.result) }}'
echo "Job results: $results"
# Check for failures or cancellations
if echo "$results" | grep -qE '"failure"|"cancelled"'; then
echo "Some jobs failed or were cancelled"
exit 1
fi
echo "All required checks passed (skipped jobs are OK)"
exit 0
================================================
FILE: .github/workflows/deepagents-example.yml
================================================
name: Deep Agents Example
on:
issue_comment:
types: [created]
pull_request_review_comment:
types: [created]
workflow_dispatch:
inputs:
prompt:
description: "Prompt for the agent"
required: true
# Cancel superseded runs when @deepagents is mentioned multiple times on the same PR/issue
concurrency:
group: ${{ github.workflow }}-${{ github.event.issue.number || github.event.pull_request.number || github.run_id }}
cancel-in-progress: true
jobs:
deepagents:
if: |
github.event_name == 'workflow_dispatch' ||
(
contains(github.event.comment.body, '@deepagents') &&
(
github.event.comment.author_association == 'OWNER' ||
github.event.comment.author_association == 'MEMBER' ||
github.event.comment.author_association == 'COLLABORATOR'
) &&
(
github.event_name == 'pull_request_review_comment' ||
github.event.issue.pull_request
)
)
runs-on: ubuntu-latest
permissions:
contents: write
issues: write
pull-requests: write
steps:
- name: Resolve PR number
if: github.event_name != 'workflow_dispatch'
id: pr-info
shell: bash
env:
GH_TOKEN: ${{ github.token }}
# issue_comment uses event.issue.number; review_comment uses event.pull_request.number
PR_NUMBER: ${{ github.event.issue.number || github.event.pull_request.number }}
run: |
echo "number=$PR_NUMBER" >> "$GITHUB_OUTPUT"
- name: Acknowledge trigger
if: github.event_name != 'workflow_dispatch'
continue-on-error: true
shell: bash
env:
GH_TOKEN: ${{ github.token }}
COMMENT_ID: ${{ github.event.comment.id }}
EVENT_NAME: ${{ github.event_name }}
REPO: ${{ github.repository }}
run: |
# issue_comment reactions use issues/comments; review_comment uses pulls/comments
if [ "$EVENT_NAME" = "pull_request_review_comment" ]; then
API_PATH="repos/${REPO}/pulls/comments/${COMMENT_ID}/reactions"
else
API_PATH="repos/${REPO}/issues/comments/${COMMENT_ID}/reactions"
fi
if ! gh api --method POST "$API_PATH" -f content='rocket'; then
echo "::warning::Failed to add reaction to comment ${COMMENT_ID} — comment may have been deleted or token may lack permissions"
fi
- name: Get PR head SHA
if: github.event_name != 'workflow_dispatch'
id: pr-sha
shell: bash
env:
GH_TOKEN: ${{ github.token }}
PR_NUMBER: ${{ steps.pr-info.outputs.number }}
run: |
PR_DATA=$(gh pr view "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --json headRefOid,headRefName)
PR_SHA=$(echo "$PR_DATA" | jq -r '.headRefOid')
PR_BRANCH=$(echo "$PR_DATA" | jq -r '.headRefName')
if [ -z "$PR_SHA" ] || [ "$PR_SHA" = "null" ] || [ -z "$PR_BRANCH" ] || [ "$PR_BRANCH" = "null" ]; then
echo "::error::Failed to resolve PR head for #${PR_NUMBER}. API response: ${PR_DATA}"
exit 1
fi
echo "sha=$PR_SHA" >> "$GITHUB_OUTPUT"
echo "branch=$PR_BRANCH" >> "$GITHUB_OUTPUT"
- uses: actions/checkout@v6
with:
# Use the PR branch name so the agent can commit and push to the PR directly.
ref: ${{ github.event_name != 'workflow_dispatch' && steps.pr-sha.outputs.branch || '' }}
- name: Build PR context prompt
if: github.event_name != 'workflow_dispatch'
id: build-prompt
shell: bash
env:
GH_TOKEN: ${{ github.token }}
TRIGGER_COMMENT_BODY: ${{ github.event.comment.body }}
TRIGGER_COMMENT_AUTHOR: ${{ github.event.comment.user.login }}
PR_NUMBER: ${{ steps.pr-info.outputs.number }}
run: |
PROMPT_FILE=$(mktemp)
GH_STDERR=$(mktemp)
trap 'rm -f "$PROMPT_FILE" "$GH_STDERR"' EXIT
# Fetch PR data
if ! PR_DATA=$(gh pr view "$PR_NUMBER" --json title,body,author,state,headRefName,baseRefName 2>"$GH_STDERR"); then
echo "::error::Failed to fetch PR #${PR_NUMBER} data: $(cat "$GH_STDERR"). Check that the PR exists and the token has 'pull-requests: read' permission."
exit 1
fi
PR_TITLE=$(echo "$PR_DATA" | jq -r '.title // "Untitled"')
PR_BODY=$(echo "$PR_DATA" | jq -r '.body // "No description"')
PR_AUTHOR=$(echo "$PR_DATA" | jq -r '.author.login // "unknown"')
PR_STATE=$(echo "$PR_DATA" | jq -r '.state // "unknown"')
PR_HEAD=$(echo "$PR_DATA" | jq -r '.headRefName // "unknown"')
PR_BASE=$(echo "$PR_DATA" | jq -r '.baseRefName // "unknown"')
# Fetch PR diff stats (first page)
if ! DIFF_STAT=$(gh pr diff "$PR_NUMBER" --name-only 2>"$GH_STDERR"); then
echo "::warning::Failed to fetch PR diff: $(cat "$GH_STDERR")"
DIFF_STAT="[Error fetching diff — see workflow logs]"
fi
# Fetch PR comments (first 20 — older ones omitted)
if ! PR_COMMENTS=$(gh api "repos/$GITHUB_REPOSITORY/issues/$PR_NUMBER/comments?per_page=20" \
--jq '.[] | "<comment author=\"\(.user.login)\">\(.body)</comment>"' 2>"$GH_STDERR"); then
echo "::warning::Failed to fetch PR comments: $(cat "$GH_STDERR")"
PR_COMMENTS="[Error fetching comments — see workflow logs]"
fi
# Fetch PR reviews (first 10)
if ! PR_REVIEWS=$(gh api "repos/$GITHUB_REPOSITORY/pulls/$PR_NUMBER/reviews?per_page=10" \
--jq '.[] | "<review author=\"\(.user.login)\" state=\"\(.state)\">\(.body // "No review body")</review>"' 2>"$GH_STDERR"); then
echo "::warning::Failed to fetch PR reviews: $(cat "$GH_STDERR")"
PR_REVIEWS="[Error fetching reviews — see workflow logs]"
fi
# Fetch review comments / inline code comments (first 30)
if ! REVIEW_COMMENTS=$(gh api "repos/$GITHUB_REPOSITORY/pulls/$PR_NUMBER/comments?per_page=30" \
--jq '.[] | "<review-comment author=\"\(.user.login)\" path=\"\(.path)\" line=\"\(.line // .original_line)\">\(.body)</review-comment>"' 2>"$GH_STDERR"); then
echo "::warning::Failed to fetch review comments: $(cat "$GH_STDERR")"
REVIEW_COMMENTS="[Error fetching review comments — see workflow logs]"
fi
cat > "$PROMPT_FILE" << 'PROMPT_HEADER'
<instructions>
The user has tagged @deepagents in a comment on this pull request. Your task is to resolve their request in the simplest way possible.
You have shell access with git and gh available. The repository is checked out on the PR branch.
Determine whether the comment requires code changes, and if so implement them directly.
- Make only the changes requested. Do not make unrelated changes.
- Do not leave comments in your code about the request or changes you're making.
- Keep changes minimal and focused.
If the comment does not require code changes (e.g. a question), respond by creating a comment on the PR with your answer.
After making changes, commit them to the current branch.
IMPORTANT: When you are finished, you MUST post a brief summary comment on the PR using `gh pr comment`. The comment should:
- Briefly describe what you did (1-3 sentences)
- List any files changed or commits made
- Note if you were unable to complete any part of the request
Always post this summary, even if the task was simple or no code changes were needed.
</instructions>
PROMPT_HEADER
# Write PR context using printf to avoid shell expansion of user-controlled content
{
printf '<pull-request>\n'
printf '<title>%s</title>\n' "$PR_TITLE"
printf '<author>%s</author>\n' "$PR_AUTHOR"
printf '<state>%s</state>\n' "$PR_STATE"
printf '<base>%s</base>\n' "$PR_BASE"
printf '<head>%s</head>\n' "$PR_HEAD"
printf '<body>\n%s\n</body>\n' "$PR_BODY"
printf '</pull-request>\n\n'
printf '<changed-files>\n%s\n</changed-files>\n\n' "$DIFF_STAT"
printf '<pull-request-comments>\n%s\n</pull-request-comments>\n\n' "$PR_COMMENTS"
printf '<pull-request-reviews>\n%s\n</pull-request-reviews>\n\n' "$PR_REVIEWS"
printf '<review-comments>\n%s\n</review-comments>\n\n' "$REVIEW_COMMENTS"
printf '<trigger-comment>\n'
printf 'This is the comment that triggered this workflow. Focus on resolving this request.\n'
printf '<author>%s</author>\n' "$TRIGGER_COMMENT_AUTHOR"
printf '<body>\n%s\n</body>\n' "$TRIGGER_COMMENT_BODY"
printf '</trigger-comment>\n\n'
printf 'Given all of this context, resolve the trigger comment in the simplest way possible.\n'
printf 'IMPORTANT: The trigger comment takes precedence. Focus on what was asked, using the PR context to inform your approach.\n'
} >> "$PROMPT_FILE"
# Set output using heredoc with random delimiter
DELIMITER="PROMPT_$(openssl rand -hex 16)"
{
echo "prompt<<${DELIMITER}"
cat "$PROMPT_FILE"
echo "${DELIMITER}"
} >> "$GITHUB_OUTPUT"
- name: Run Deep Agents
uses: langchain-ai/deepagents@main
with:
prompt: ${{ github.event_name != 'workflow_dispatch' && steps.build-prompt.outputs.prompt || github.event.inputs.prompt }}
model: claude-sonnet-4-6
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
# Or: openai_api_key: ${{ secrets.OPENAI_API_KEY }}
# Or: google_api_key: ${{ secrets.GOOGLE_API_KEY }}
skills_repo: langchain-ai/langchain-skills
================================================
FILE: .github/workflows/evals.yml
================================================
# Daily evaluation workflow for Deep Agents
#
# Runs tests/evals on a cron schedule (once per day).
# Single job; model/provider is selected via workflow input `model`.
#
# Required secrets:
# LANGSMITH_API_KEY — used for tracing
# ANTHROPIC_API_KEY — needed for Anthropic models
# OPENAI_API_KEY — needed for OpenAI models
# GOOGLE_API_KEY — needed for Google models
# XAI_API_KEY — needed for xAI/Grok models
# MISTRAL_API_KEY — needed for Mistral models
# DEEPSEEK_API_KEY — needed for DeepSeek models
# GROQ_API_KEY — needed for Groq-hosted models
# OLLAMA_API_KEY — needed for Ollama Cloud models
# OLLAMA_HOST — set to https://ollama.com for cloud inference
# NVIDIA_API_KEY — needed for NVIDIA NIM models
# BASETEN_API_KEY — needed for Baseten-hosted models
# FIREWORKS_API_KEY — needed for Fireworks-hosted models
# OPENROUTER_API_KEY — needed for OpenRouter-hosted models
name: "📊 Evals"
on:
workflow_dispatch:
inputs:
models:
description: "Model set to evaluate. Set definitions: .github/scripts/models.py. Use models_override for individual models."
required: true
default: "all"
type: choice
options:
- all
- set0
- set1
- set2
- open
- "anthropic:claude-haiku-4-5-20251001"
- "anthropic:claude-sonnet-4-20250514"
- "anthropic:claude-sonnet-4-5-20250929"
- "anthropic:claude-sonnet-4-6"
- "anthropic:claude-opus-4-1"
- "anthropic:claude-opus-4-5-20251101"
- "anthropic:claude-opus-4-6"
- "openai:gpt-4o"
- "openai:gpt-4o-mini"
- "openai:gpt-4.1"
- "openai:o3"
- "openai:o4-mini"
- "openai:gpt-5.1-codex"
- "openai:gpt-5.2-codex"
- "openai:gpt-5.4"
- "google_genai:gemini-2.5-flash"
- "google_genai:gemini-2.5-pro"
- "google_genai:gemini-3-flash-preview"
- "google_genai:gemini-3.1-pro-preview"
- "openrouter:minimax/minimax-m2.7"
- "baseten:zai-org/GLM-5"
- "baseten:MiniMaxAI/MiniMax-M2.5"
- "baseten:moonshotai/Kimi-K2.5"
- "baseten:deepseek-ai/DeepSeek-V3.2"
- "baseten:Qwen/Qwen3-Coder-480B-A35B-Instruct"
- "fireworks:fireworks/qwen3-vl-235b-a22b-thinking"
- "fireworks:fireworks/deepseek-v3-0324"
- "fireworks:fireworks/minimax-m2p1"
- "fireworks:fireworks/kimi-k2p5"
- "fireworks:fireworks/glm-5"
- "fireworks:fireworks/minimax-m2p5"
- "ollama:glm-5"
- "ollama:minimax-m2.5"
- "ollama:qwen3.5:397b-cloud"
- "groq:openai/gpt-oss-120b"
- "groq:qwen/qwen3-32b"
- "groq:moonshotai/kimi-k2-instruct"
- "xai:grok-4"
- "xai:grok-3-mini-fast"
- "ollama:nemotron-3-nano:30b"
- "ollama:cogito-2.1:671b"
- "ollama:devstral-2:123b"
- "ollama:ministral-3:14b"
- "ollama:qwen3-next:80b"
- "ollama:qwen3-coder:480b-cloud"
- "ollama:deepseek-v3.2:cloud"
- "nvidia:nvidia/nemotron-3-super-120b-a12b"
models_override:
description: "Custom model list (overrides dropdown). Comma-separated 'provider:model' specs, e.g. 'openai:gpt-4.1,anthropic:claude-sonnet-4-6'. Leave empty to use the preset selection above."
required: false
default: ""
type: string
eval_categories:
description: "Comma-separated eval categories to run (e.g. 'memory,hitl,tool_usage'). Leave empty to run all categories."
required: false
default: ""
type: string
permissions:
contents: write
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-${{ inputs.models_override || inputs.models || 'all' }}
cancel-in-progress: true
env:
UV_NO_SYNC: "true"
UV_FROZEN: "true"
jobs:
prep:
name: "🔧 Prepare matrix"
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
steps:
- name: "📝 Log dispatch inputs"
continue-on-error: true
env:
MODELS: ${{ inputs.models }}
MODELS_OVERRIDE: ${{ inputs.models_override || '(empty)' }}
RESOLVED: ${{ inputs.models_override || inputs.models || 'all' }}
EVAL_CATEGORIES: ${{ inputs.eval_categories || '(all)' }}
run: |
echo "### 📊 Eval dispatch inputs" >> "$GITHUB_STEP_SUMMARY"
echo "" >> "$GITHUB_STEP_SUMMARY"
echo "| Input | Value |" >> "$GITHUB_STEP_SUMMARY"
echo "|---|---|" >> "$GITHUB_STEP_SUMMARY"
echo "| \`models\` | \`${MODELS}\` |" >> "$GITHUB_STEP_SUMMARY"
echo "| \`models_override\` | \`${MODELS_OVERRIDE}\` |" >> "$GITHUB_STEP_SUMMARY"
echo "| **Resolved**¹ | \`${RESOLVED}\` |" >> "$GITHUB_STEP_SUMMARY"
echo "| \`eval_categories\` (list) | \`${EVAL_CATEGORIES}\` |" >> "$GITHUB_STEP_SUMMARY"
# Build eval_categories as a bullet list
if [ "${EVAL_CATEGORIES}" = "(all)" ]; then
echo "| \`eval_categories\` | (all) |" >> "$GITHUB_STEP_SUMMARY"
else
bullets=""
IFS=',' read -ra cats <<< "${EVAL_CATEGORIES}"
for cat in "${cats[@]}"; do
cat=$(echo "$cat" | xargs)
bullets="${bullets}<li><code>${cat}</code></li>"
done
echo "| \`eval_categories\` | <ul>${bullets}</ul> |" >> "$GITHUB_STEP_SUMMARY"
fi
echo "" >> "$GITHUB_STEP_SUMMARY"
echo "> ¹ **Resolved** = \`models_override\` if set, otherwise \`models\` dropdown, otherwise \`all\`." >> "$GITHUB_STEP_SUMMARY"
- name: "📋 Checkout Code"
uses: actions/checkout@v6
- name: "🐍 Compute eval matrix"
id: set-matrix
run: python .github/scripts/models.py eval
env:
EVAL_MODELS: ${{ inputs.models_override || inputs.models || 'all' }}
eval:
name: "📊 Eval (${{ matrix.model }})"
needs: prep
runs-on: ubuntu-latest
timeout-minutes: 120
strategy:
fail-fast: false
matrix: ${{ fromJson(needs.prep.outputs.matrix) }}
defaults:
run:
working-directory: libs/evals
env:
PYTEST_ADDOPTS: "--model ${{ matrix.model }} --evals-report-file evals_report.json"
LANGSMITH_API_KEY: ${{ secrets.LANGSMITH_API_KEY }}
LANGSMITH_TRACING_V2: "true"
LANGSMITH_EXPERIMENT: ${{ matrix.model }}
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
XAI_API_KEY: ${{ secrets.XAI_API_KEY }}
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
DEEPSEEK_API_KEY: ${{ secrets.DEEPSEEK_API_KEY }}
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
OLLAMA_API_KEY: ${{ secrets.OLLAMA_API_KEY }}
OLLAMA_HOST: "https://ollama.com"
NVIDIA_API_KEY: ${{ secrets.NVIDIA_API_KEY }}
BASETEN_API_KEY: ${{ secrets.BASETEN_API_KEY }}
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }}
steps:
- name: "📋 Checkout Code"
uses: actions/checkout@v6
- name: "🐍 Set up Python + UV"
uses: "./.github/actions/uv_setup"
with:
python-version: "3.12"
cache-suffix: evals
working-directory: libs/evals
- name: "📦 Install Dependencies"
run: uv sync --group test
- name: "🏷️ Apply category filter"
if: inputs.eval_categories != ''
run: |
flags=""
IFS=',' read -ra cats <<< "${{ inputs.eval_categories }}"
for cat in "${cats[@]}"; do
cat=$(echo "$cat" | xargs)
flags="$flags --eval-category $cat"
done
echo "PYTEST_ADDOPTS=${PYTEST_ADDOPTS}${flags}" >> "$GITHUB_ENV"
- name: "📊 Run Evals"
run: make evals
- name: "📤 Upload eval report"
if: always()
uses: actions/upload-artifact@v7
with:
name: evals-report-${{ strategy.job-index }}
path: libs/evals/evals_report.json
if-no-files-found: error
aggregate:
name: "📋 Aggregate evals"
runs-on: ubuntu-latest
needs: [eval]
if: always()
steps:
- name: "📋 Checkout Code"
uses: actions/checkout@v6
- name: "📥 Download eval artifacts"
uses: actions/download-artifact@v8
with:
path: evals_artifacts
- name: "🐍 Set up Python + UV"
uses: "./.github/actions/uv_setup"
with:
python-version: "3.12"
cache-suffix: evals-aggregate
working-directory: libs/evals
- name: "🧾 Write summary"
run: uv run --with tabulate python .github/scripts/aggregate_evals.py
- name: "📦 Install evals package"
id: install-evals
if: hashFiles('evals_summary.json') != ''
working-directory: libs/evals
run: uv sync --extra charts
- name: "📊 Generate radar chart"
if: hashFiles('evals_summary.json') != '' && steps.install-evals.outcome == 'success'
working-directory: libs/evals
run: uv run --extra charts python scripts/generate_radar.py --summary ../../evals_summary.json -o ../../charts/radar.png --individual-dir ../../charts/individual --title "Deep Agents Eval Results"
- name: "📤 Upload JSON summary"
uses: actions/upload-artifact@v7
with:
name: evals-summary
path: evals_summary.json
if-no-files-found: warn
- name: "📤 Upload radar charts"
if: hashFiles('charts/radar.png') != ''
uses: actions/upload-artifact@v7
with:
name: radar-charts
path: charts/
- name: "🖼️ Publish charts to eval-assets branch"
id: publish-charts
if: hashFiles('charts/radar.png') != ''
env:
RUN_ID: ${{ github.run_id }}
REPO: ${{ github.repository }}
GITHUB_TOKEN: ${{ github.token }}
run: |
set -euo pipefail
asset_dir="runs/${RUN_ID}"
# Set up a temp workdir so we don't disturb the main checkout.
tmp="$(mktemp -d)"
cd "$tmp"
git init -q
git remote add origin "https://x-access-token:${GITHUB_TOKEN}@github.com/${REPO}.git"
# Fetch eval-assets if it exists; otherwise start an orphan branch.
if git ls-remote --exit-code origin eval-assets >/dev/null 2>&1; then
git fetch --depth=1 origin eval-assets
git checkout eval-assets
else
git checkout --orphan eval-assets
git rm -rf . 2>/dev/null || true
echo "Auto-managed branch for eval chart assets. Do not merge." > README.md
git add README.md
fi
# Copy charts into run-specific directory.
mkdir -p "${asset_dir}"
cp "$GITHUB_WORKSPACE/charts/radar.png" "${asset_dir}/radar.png"
if [ -d "$GITHUB_WORKSPACE/charts/individual" ]; then
cp -r "$GITHUB_WORKSPACE/charts/individual" "${asset_dir}/individual"
fi
git add "${asset_dir}"
git -c user.name="github-actions[bot]" \
-c user.email="41898282+github-actions[bot]@users.noreply.github.com" \
commit -m "evals: add charts for run ${RUN_ID}" --allow-empty
git push origin eval-assets
# Expose base URL for the summary step.
base="https://raw.githubusercontent.com/${REPO}/eval-assets/${asset_dir}"
echo "base_url=${base}" >> "$GITHUB_OUTPUT"
- name: "🖼️ Append charts to summary"
if: steps.publish-charts.outcome == 'success'
env:
BASE_URL: ${{ steps.publish-charts.outputs.base_url }}
run: |
{
echo ""
echo "## Radar charts"
echo ""
echo "### Combined"
echo ""
echo ""
echo ""
if [ -d charts/individual ]; then
echo "### Per-model"
echo ""
for img in charts/individual/*.png; do
name="$(basename "$img" .png)"
echo ""
echo ""
done
fi
} >> "$GITHUB_STEP_SUMMARY"
================================================
FILE: .github/workflows/harbor.yml
================================================
name: "⚓ Harbor"
on:
workflow_dispatch:
inputs:
models:
description: "Model set to run. Set definitions: .github/scripts/models.py. Use models_override for individual models."
required: true
default: "all"
type: choice
options:
- all
- anthropic
- openai
- baseten
- "anthropic:claude-sonnet-4-20250514"
- "anthropic:claude-sonnet-4-5-20250929"
- "anthropic:claude-sonnet-4-6"
- "anthropic:claude-opus-4-1"
- "anthropic:claude-opus-4-5-20251101"
- "anthropic:claude-opus-4-6"
- "openai:gpt-4.1"
- "openai:o3"
- "openai:o4-mini"
- "openai:gpt-5.4"
- "baseten:zai-org/GLM-5"
- "baseten:MiniMaxAI/MiniMax-M2.5"
- "baseten:moonshotai/Kimi-K2.5"
- "baseten:deepseek-ai/DeepSeek-V3.2"
- "baseten:Qwen/Qwen3-Coder-480B-A35B-Instruct"
models_override:
description: "Override: comma-separated models (e.g. 'openai:gpt-4.1,anthropic:claude-sonnet-4-6'). Takes priority over dropdown when non-empty."
required: false
default: ""
type: string
sandbox_env:
description: "Harbor sandbox environment"
required: true
default: "docker"
type: choice
options:
- docker
- daytona
- langsmith
- modal
- runloop
task_count:
description: "Number of Terminal Bench 2 tasks to run"
required: true
default: "1"
type: string
permissions:
contents: read
env:
UV_NO_SYNC: "true"
HARBOR_DATASET_NAME: "terminal-bench"
HARBOR_DATASET_VERSION: "2.0"
jobs:
prep:
name: "🔧 Prepare matrix"
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
env:
LANGSMITH_API_KEY: ${{ secrets.LANGSMITH_API_KEY }}
steps:
- name: "📋 Checkout Code"
uses: actions/checkout@v6
- name: "🐍 Compute Harbor matrix"
id: set-matrix
run: python .github/scripts/models.py harbor
env:
HARBOR_MODELS: ${{ inputs.models_override || inputs.models || 'all' }}
- name: "🐍 Set up Python + UV"
uses: "./.github/actions/uv_setup"
with:
python-version: "3.12"
cache-suffix: harbor-prep
working-directory: libs/evals
- name: "📦 Install Dependencies"
working-directory: libs/evals
run: uv sync --group test --locked
- name: "🧪 Ensure LangSmith dataset"
working-directory: libs/evals
run: uv run python scripts/harbor_langsmith.py ensure-dataset "$HARBOR_DATASET_NAME" --version "$HARBOR_DATASET_VERSION"
harbor:
name: "⚓ Harbor (${{ matrix.model }} / ${{ inputs.sandbox_env }})"
needs: prep
runs-on: ubuntu-latest
timeout-minutes: 360
strategy:
fail-fast: false
matrix: ${{ fromJson(needs.prep.outputs.matrix) }}
defaults:
run:
working-directory: libs/evals
env:
LANGSMITH_API_KEY: ${{ secrets.LANGSMITH_API_KEY }}
LANGSMITH_TRACING_V2: "true"
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
BASETEN_API_KEY: ${{ secrets.BASETEN_API_KEY }}
DAYTONA_API_KEY: ${{ secrets.DAYTONA_API_KEY }}
HARBOR_TASK_COUNT: ${{ inputs.task_count }}
HARBOR_SANDBOX_ENV: ${{ inputs.sandbox_env }}
HARBOR_MODEL: ${{ matrix.model }}
steps:
- name: "📋 Checkout Code"
uses: actions/checkout@v6
- name: "🐍 Set up Python + UV"
uses: "./.github/actions/uv_setup"
with:
python-version: "3.12"
cache-suffix: harbor
working-directory: libs/evals
- name: "📦 Install Dependencies"
run: uv sync --group test --locked
- name: "🧪 Create LangSmith experiment"
id: langsmith
run: |
experiment_name=$(uv run python scripts/harbor_langsmith.py create-experiment "$HARBOR_DATASET_NAME")
echo "experiment_name=$experiment_name" >> "$GITHUB_OUTPUT"
echo "LANGSMITH_EXPERIMENT=$experiment_name" >> "$GITHUB_ENV"
- name: "⚓ Run Harbor"
run: |
uv run harbor run \
--agent-import-path deepagents_harbor:DeepAgentsWrapper \
--dataset "$HARBOR_DATASET_NAME@$HARBOR_DATASET_VERSION" \
-n "$HARBOR_TASK_COUNT" \
--jobs-dir jobs/terminal-bench \
--env "$HARBOR_SANDBOX_ENV" \
--model "$HARBOR_MODEL" \
--agent-kwarg use_cli_agent=false
- name: "🔍 Find latest Harbor job"
id: latest-job
run: |
latest_job=$(python - <<'PY'
from pathlib import Path
jobs_dir = Path("jobs/terminal-bench")
job_dirs = sorted(path for path in jobs_dir.iterdir() if path.is_dir())
if not job_dirs:
raise SystemExit("No Harbor job directory found")
print(job_dirs[-1])
PY
)
echo "job_dir=$latest_job" >> "$GITHUB_OUTPUT"
- name: "⭐ Add Harbor rewards to LangSmith"
if: always() && steps.latest-job.outcome == 'success' && steps.langsmith.outcome == 'success'
env:
HARBOR_JOB_DIR: ${{ steps.latest-job.outputs.job_dir }}
LANGSMITH_EXPERIMENT_NAME: ${{ steps.langsmith.outputs.experiment_name }}
run: |
uv run python scripts/harbor_langsmith.py add-feedback \
"$HARBOR_JOB_DIR" \
--project-name "$LANGSMITH_EXPERIMENT_NAME"
- name: "📝 Write workflow summary"
if: always()
env:
HARBOR_JOB_DIR: ${{ steps.latest-job.outputs.job_dir }}
LANGSMITH_EXPERIMENT_NAME: ${{ steps.langsmith.outputs.experiment_name }}
LATEST_JOB_OUTCOME: ${{ steps.latest-job.outcome }}
run: |
{
echo "## Harbor run"
echo
echo "- Model: $HARBOR_MODEL"
echo "- Dataset: ${HARBOR_DATASET_NAME}@${HARBOR_DATASET_VERSION}"
echo "- Sandbox: ${HARBOR_SANDBOX_ENV}"
echo "- Task count: ${HARBOR_TASK_COUNT}"
echo "- LangSmith experiment: $LANGSMITH_EXPERIMENT_NAME"
if [ "$LATEST_JOB_OUTCOME" = "success" ]; then
echo "- Harbor job dir: $HARBOR_JOB_DIR"
fi
} >> "$GITHUB_STEP_SUMMARY"
- name: "📤 Upload Harbor artifacts"
if: always()
uses: actions/upload-artifact@v7
with:
name: harbor-${{ strategy.job-index }}
path: |
libs/evals/jobs/terminal-bench
if-no-files-found: warn
================================================
FILE: .github/workflows/pr_labeler.yml
================================================
# Unified PR labeler — applies size, file-based, title-based, and
# contributor classification labels in a single sequential workflow.
#
# Consolidates pr_size_labeler.yml, pr_labeler_file.yml,
# pr_labeler_title.yml, and PR-handling from tag-external-issues.yml
# into one workflow to eliminate race conditions from concurrent label
# mutations. tag-external-issues.yml remains active for issue-only
# labeling. Backfill lives in pr_labeler_backfill.yml.
#
# Config and shared logic live in .github/scripts/pr-labeler-config.json
# and .github/scripts/pr-labeler.js — update those when adding partners.
#
# Setup Requirements:
# 1. Create a GitHub App with permissions:
# - Repository: Pull requests (write)
# - Repository: Issues (write)
# - Organization: Members (read)
# 2. Install the app on your organization and this repository
# 3. Add these repository secrets:
# - ORG_MEMBERSHIP_APP_ID: Your app's ID
# - ORG_MEMBERSHIP_APP_PRIVATE_KEY: Your app's private key
#
# The GitHub App token is required to check private organization membership
# and to propagate label events to downstream workflows.
name: "🏷️ PR Labeler"
on:
# Safe since we only check out the base branch, not the PR's code.
# Never check out the PR's head in a pull_request_target job.
pull_request_target:
types: [opened, synchronize, reopened, edited]
permissions:
contents: read
concurrency:
# Separate opened events so external/tier labels are never lost to cancellation
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }}-${{ github.event.action == 'opened' && 'opened' || 'update' }}
cancel-in-progress: ${{ github.event.action != 'opened' }}
jobs:
label:
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: write
issues: write
steps:
# Checks out the base branch (NOT the PR head) so that
# require('./.github/scripts/pr-labeler.js') resolves.
- uses: actions/checkout@v6
- name: Generate GitHub App token
if: github.event.action == 'opened'
id: app-token
uses: actions/create-github-app-token@v3
with:
app-id: ${{ secrets.ORG_MEMBERSHIP_APP_ID }}
private-key: ${{ secrets.ORG_MEMBERSHIP_APP_PRIVATE_KEY }}
- name: Verify App token
if: github.event.action == 'opened'
run: |
if [ -z "${{ steps.app-token.outputs.token }}" ]; then
echo "::error::GitHub App token generation failed — cannot classify contributor"
exit 1
fi
- name: Check org membership
if: github.event.action == 'opened'
id: check-membership
uses: actions/github-script@v8
with:
github-token: ${{ steps.app-token.outputs.token }}
script: |
const { owner, repo } = context.repo;
const { h } = require('./.github/scripts/pr-labeler.js').loadAndInit(github, owner, repo, core);
const author = context.payload.sender.login;
const { isExternal } = await h.checkMembership(
author, context.payload.sender.type,
);
core.setOutput('is-external', isExternal ? 'true' : 'false');
# Rename `deepagents` scope → `sdk` for non-release PRs.
# Release PRs (e.g. `release(deepagents): 1.2.0`) are left
# untouched since their titles are canonical version records.
# Runs before labeling so title-based labels read the
# corrected title.
- name: Rename deepagents scope to sdk
id: rename-scope
uses: actions/github-script@v8
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const pr = context.payload.pull_request;
if (!pr) {
console.log('No pull_request in payload; skipping scope rename');
return;
}
const title = pr.title ?? '';
const match = title.match(/^(\w+!?)\(([^)]+)\)(!?:\s*.*)$/);
if (!match) {
console.log(`Title has no scoped format; skipping rename: "${title}"`);
return;
}
const type = match[1].replace('!', '').toLowerCase();
if (type === 'release') {
console.log(`Skipping release PR: ${title}`);
return;
}
const scopeStr = match[2];
const newScope = scopeStr
.split(',')
.map(s => s.trim() === 'deepagents' ? 'sdk' : s.trim())
.join(',');
if (newScope === scopeStr) return;
const newTitle = `${match[1]}(${newScope})${match[3]}`;
console.log(`Renaming: "${title}" → "${newTitle}"`);
try {
await github.rest.pulls.update({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: pr.number,
title: newTitle,
});
} catch (error) {
core.warning(
`Failed to rename PR #${pr.number} title ` +
`(${error.status ?? 'unknown'}): ${error.message}. ` +
`Labeling will continue with the original title.`
);
return;
}
// Pass corrected title to the labeling step via output;
// context.payload.pull_request.title is frozen from the
// webhook event and won't reflect the API update.
core.setOutput('title', newTitle);
- name: Apply PR labels
uses: actions/github-script@v8
env:
IS_EXTERNAL: ${{ steps.check-membership.outputs.is-external }}
RENAMED_TITLE: ${{ steps.rename-scope.outputs.title }}
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const { owner, repo } = context.repo;
const { h } = require('./.github/scripts/pr-labeler.js').loadAndInit(github, owner, repo, core);
const pr = context.payload.pull_request;
if (!pr) return;
const prNumber = pr.number;
const action = context.payload.action;
const toAdd = new Set();
const toRemove = new Set();
const currentLabels = (await github.paginate(
github.rest.issues.listLabelsOnIssue,
{ owner, repo, issue_number: prNumber, per_page: 100 },
)).map(l => l.name ?? '');
// ── Size + file labels (skip on 'edited' — files unchanged) ──
if (action !== 'edited') {
for (const sl of h.sizeLabels) await h.ensureLabel(sl);
const files = await github.paginate(github.rest.pulls.listFiles, {
owner, repo, pull_number: prNumber, per_page: 100,
});
const { totalChanged, sizeLabel } = h.computeSize(files);
toAdd.add(sizeLabel);
for (const sl of h.sizeLabels) {
if (currentLabels.includes(sl) && sl !== sizeLabel) toRemove.add(sl);
}
console.log(`Size: ${totalChanged} changed lines → ${sizeLabel}`);
for (const label of h.matchFileLabels(files)) {
toAdd.add(label);
}
}
// ── Title-based labels ──
// Use renamed title if the scope-rename step rewrote it,
// since pr.title still reflects the pre-update value.
const title = process.env.RENAMED_TITLE || pr.title || '';
const { labels: titleLabels, typeLabel } = h.matchTitleLabels(title);
for (const label of titleLabels) {
toAdd.add(label);
}
// Remove stale type labels only when a type was detected
if (typeLabel) {
for (const tl of h.allTypeLabels) {
if (currentLabels.includes(tl) && !titleLabels.has(tl)) toRemove.add(tl);
}
}
// ── Internal label (only on open, non-external contributors) ──
// IS_EXTERNAL is empty string on non-opened events (step didn't
// run), so this guard is only true for opened + internal.
if (action === 'opened' && process.env.IS_EXTERNAL === 'false') {
toAdd.add('internal');
}
// ── Apply changes ──
// Ensure all labels we're about to add exist (addLabels returns
// 422 if any label in the batch is missing, which would prevent
// ALL labels from being applied).
for (const name of toAdd) {
await h.ensureLabel(name);
}
for (const name of toRemove) {
if (toAdd.has(name)) continue;
try {
await github.rest.issues.removeLabel({
owner, repo, issue_number: prNumber, name,
});
} catch (e) {
if (e.status !== 404) throw e;
}
}
const addList = [...toAdd];
if (addList.length > 0) {
await github.rest.issues.addLabels({
owner, repo, issue_number: prNumber, labels: addList,
});
}
const removed = [...toRemove].filter(r => !toAdd.has(r));
console.log(`PR #${prNumber}: +[${addList.join(', ')}] -[${removed.join(', ')}]`);
- name: Apply contributor tier label
if: github.event.action == 'opened' && steps.check-membership.outputs.is-external == 'true'
uses: actions/github-script@v8
with:
github-token: ${{ steps.app-token.outputs.token }}
script: |
const { owner, repo } = context.repo;
const { h } = require('./.github/scripts/pr-labeler.js').loadAndInit(github, owner, repo, core);
const pr = context.payload.pull_request;
await h.applyTierLabel(pr.number, pr.user.login);
- name: Add external label
if: github.event.action == 'opened' && steps.check-membership.outputs.is-external == 'true'
uses: actions/github-script@v8
with:
# Use App token so the "labeled" event propagates to downstream
# workflows (e.g. require_issue_link.yml). Events created by the
# default GITHUB_TOKEN do not trigger additional workflow runs.
github-token: ${{ steps.app-token.outputs.token }}
script: |
const { owner, repo } = context.repo;
const { h } = require('./.github/scripts/pr-labeler.js').loadAndInit(github, owner, repo, core);
const prNumber = context.payload.pull_request.number;
await h.ensureLabel('external');
await github.rest.issues.addLabels({
owner, repo,
issue_number: prNumber,
labels: ['external'],
});
console.log(`Added 'external' label to PR #${prNumber}`);
================================================
FILE: .github/workflows/pr_labeler_backfill.yml
================================================
# Backfill PR labels on all open PRs.
#
# Manual-only workflow that applies the same labels as pr_labeler.yml
# (size, file, title, contributor classification) to existing open PRs.
# Reuses shared logic from .github/scripts/pr-labeler.js.
name: "🏷️ PR Labeler Backfill"
on:
workflow_dispatch:
inputs:
max_items:
description: "Maximum number of open PRs to process"
default: "100"
type: string
permissions:
contents: read
jobs:
backfill:
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: write
issues: write
steps:
- uses: actions/checkout@v6
- name: Generate GitHub App token
id: app-token
uses: actions/create-github-app-token@v3
with:
app-id: ${{ secrets.ORG_MEMBERSHIP_APP_ID }}
private-key: ${{ secrets.ORG_MEMBERSHIP_APP_PRIVATE_KEY }}
- name: Backfill labels on open PRs
uses: actions/github-script@v8
with:
github-token: ${{ steps.app-token.outputs.token }}
script: |
const { owner, repo } = context.repo;
const rawMax = '${{ inputs.max_items }}';
const maxItems = parseInt(rawMax, 10);
if (isNaN(maxItems) || maxItems <= 0) {
core.setFailed(`Invalid max_items: "${rawMax}" — must be a positive integer`);
return;
}
const { config, h } = require('./.github/scripts/pr-labeler.js').loadAndInit(github, owner, repo, core);
for (const name of [...h.sizeLabels, ...h.tierLabels]) {
await h.ensureLabel(name);
}
const contributorCache = new Map();
const fileRules = h.buildFileRules();
const prs = await github.paginate(github.rest.pulls.list, {
owner, repo, state: 'open', per_page: 100,
});
let processed = 0;
let failures = 0;
for (const pr of prs) {
if (processed >= maxItems) break;
try {
const author = pr.user.login;
const info = await h.getContributorInfo(contributorCache, author, pr.user.type);
const labels = new Set();
labels.add(info.isExternal ? 'external' : 'internal');
if (info.isExternal && info.mergedCount != null && info.mergedCount >= config.trustedThreshold) {
labels.add('trusted-contributor');
} else if (info.isExternal && info.mergedCount === 0) {
labels.add('new-contributor');
}
// Size + file labels
const files = await github.paginate(github.rest.pulls.listFiles, {
owner, repo, pull_number: pr.number, per_page: 100,
});
const { sizeLabel } = h.computeSize(files);
labels.add(sizeLabel);
for (const label of h.matchFileLabels(files, fileRules)) {
labels.add(label);
}
// Title labels
const { labels: titleLabels } = h.matchTitleLabels(pr.title ?? '');
for (const tl of titleLabels) labels.add(tl);
// Ensure all labels exist before batch add
for (const name of labels) {
await h.ensureLabel(name);
}
// Remove stale managed labels
const currentLabels = (await github.paginate(
github.rest.issues.listLabelsOnIssue,
{ owner, repo, issue_number: pr.number, per_page: 100 },
)).map(l => l.name ?? '');
const managed = [...h.sizeLabels, ...h.tierLabels, ...h.allTypeLabels];
for (const name of currentLabels) {
if (managed.includes(name) && !labels.has(name)) {
try {
await github.rest.issues.removeLabel({
owner, repo, issue_number: pr.number, name,
});
} catch (e) {
if (e.status !== 404) throw e;
}
}
}
await github.rest.issues.addLabels({
owner, repo, issue_number: pr.number, labels: [...labels],
});
console.log(`PR #${pr.number} (${author}): ${[...labels].join(', ')}`);
processed++;
} catch (e) {
failures++;
core.warning(`Failed to process PR #${pr.number}: ${e.message}`);
}
}
console.log(`\nBackfill complete. Processed ${processed} PRs, ${failures} failures. ${contributorCache.size} unique authors.`);
================================================
FILE: .github/workflows/pr_lint.yml
================================================
# PR title linting.
#
# FORMAT (Conventional Commits 1.0.0):
#
# <type>[optional scope]: <description>
# [optional body]
# [optional footer(s)]
#
# Examples:
# feat(sdk): add multi‐agent support
# fix(cli): resolve flag parsing error
# docs: update API usage examples
#
# Allowed Types:
# * feat — a new feature (MINOR)
# * fix — a bug fix (PATCH)
# * docs — documentation only changes
# * style — formatting, linting, etc.; no code change or typing refactors
# * refactor — code change that neither fixes a bug nor adds a feature
# * perf — code change that improves performance
# * test — adding tests or correcting existing
# * build — changes that affect the build system/external dependencies
# * ci — continuous integration/configuration changes
# * chore — other changes that don't modify source or test files
# * revert — reverts a previous commit
# * release — prepare a new release
# * hotfix — urgent fix that won't trigger a release
#
# Allowed Scope(s) (optional):
# deepagents, sdk, deepagents-cli, cli, cli-gha, harbor, evals, acp, examples, infra, ci, deps
#
# Multiple scopes can be used by separating them with a comma.
#
# Rules:
# 1. The 'Type' must start with a lowercase letter.
# 2. Breaking changes: append "!" after type/scope (e.g., feat!: drop x support)
# 3. When releasing (updating the pyproject.toml and uv.lock), the commit message
# should be: `release(scope): x.y.z` (e.g., `release(deepagents): 1.2.0` with no
# body, footer, or preceeding/proceeding text).
#
# Enforces Conventional Commits format for pull request titles to maintain a clear and
# machine-readable change history.
name: "🏷️ PR Title Lint"
permissions:
pull-requests: read
on:
pull_request:
types: [opened, edited, synchronize]
jobs:
# Validates that PR title follows Conventional Commits 1.0.0 specification
lint-pr-title:
name: "validate format"
runs-on: ubuntu-latest
steps:
- name: "🚫 Reject empty scope"
env:
PR_TITLE: ${{ github.event.pull_request.title }}
run: |
if [[ "$PR_TITLE" =~ ^[a-z]+\(\)[!]?: ]]; then
echo "::error::PR title has empty scope parentheses: '$PR_TITLE'"
echo "Either remove the parentheses or provide a scope (e.g., 'fix(cli): ...')."
exit 1
fi
- name: "✅ Validate Conventional Commits Format"
uses: amannn/action-semantic-pull-request@v6
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
types: |
feat
fix
docs
style
refactor
perf
test
build
ci
chore
revert
release
hotfix
scopes: |
acp
ci
cli
cli-gha
daytona
deepagents
deepagents-cli
deps
evals
examples
harbor
infra
quickjs
sdk
requireScope: false
disallowScopes: |
release
[A-Z]+
ignoreLabels: |
ignore-lint-pr-title
================================================
FILE: .github/workflows/release-please.yml
================================================
# Creates release PRs based on conventional commits.
#
# When commits land on main, release-please analyzes them and either:
# - Creates/updates a release PR with changelog and version bump
# - When a release PR is merged, triggers the release workflow
#
# GitHub releases are created by release.yml after all checks pass,
# not by release-please directly (skip-github-release: true in config).
name: Release Please (CLI ONLY)
on:
push:
branches:
- main
jobs:
release-please:
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
outputs:
cli-release: ${{ steps.check-cli-release.outputs.is-release }}
pr: ${{ steps.release.outputs.pr }}
steps:
- uses: googleapis/release-please-action@v4
id: release
with:
config-file: release-please-config.json
manifest-file: .release-please-manifest.json
# Detect CLI release by checking if this commit updated the CLI's CHANGELOG.md
# release-please ALWAYS updates CHANGELOG.md when merging a release PR
- uses: actions/checkout@v6
with:
fetch-depth: 2
- name: Check if CLI release PR was merged
id: check-cli-release
run: |
if git diff --name-only HEAD~1 HEAD | grep -q "^libs/cli/CHANGELOG.md$"; then
echo "is-release=true" >> $GITHUB_OUTPUT
echo "CLI CHANGELOG.md was modified - this is a release commit"
else
echo "is-release=false" >> $GITHUB_OUTPUT
fi
# Update uv.lock files when release-please creates/updates a PR
# release-please updates pyproject.toml versions but doesn't regenerate lockfiles
# https://github.com/googleapis/release-please/issues/2561
update-lockfiles:
needs: release-please
if: needs.release-please.outputs.pr != ''
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Checkout release branch
uses: actions/checkout@v6
with:
ref: ${{ fromJson(needs.release-please.outputs.pr).headBranchName }}
- name: Setup uv
uses: astral-sh/setup-uv@v7
- name: Update lockfiles
run: |
for dir in $(find . -name "uv.lock" -type f -exec dirname {} \;); do
echo "Updating $dir"
if [ "$dir" = "./libs/acp" ]; then
uv lock --directory "$dir" --python 3.14
else
uv lock --directory "$dir" --python 3.12
fi
done
- name: Commit and push
run: |
git config user.name "github-actions[bot]"
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
git add "*/uv.lock"
if git diff --staged --quiet; then
echo "No lockfile changes to commit"
else
git commit -m "chore: update lockfiles"
git push
fi
# Trigger release workflow when CLI release PR is merged
# GitHub release is created by release.yml AFTER all checks pass
release-deepagents-cli:
needs: release-please
if: needs.release-please.outputs.cli-release == 'true'
uses: ./.github/workflows/release.yml
with:
package: deepagents-cli
permissions:
contents: write
id-token: write
# write needed to update PR label from "autorelease: pending" to "autorelease: tagged"
pull-requests: write
================================================
FILE: .github/workflows/release.yml
================================================
# Builds and publishes deepagents packages to PyPI.
#
# Triggers:
# - Automatically via workflow_call from release-please.yml when a release PR is merged
# - Manually via workflow_dispatch
#
# Flow: build -> pre-release-checks -> test-pypi -> publish -> release
name: "🚀 Package Release"
run-name: "Release ${{ inputs.package }}"
on:
workflow_call:
inputs:
package:
required: true
type: string
description: "Package to release"
workflow_dispatch:
inputs:
package:
required: true
type: choice
description: "Package to release (⚠️ For deepagents-cli, use release-please by default; manual dispatch is exception-only for recovery/hotfix scenarios — see .github/RELEASING.md)"
options:
- deepagents
- deepagents-cli
- deepagents-acp
- deepagents-evals
- langchain-daytona
- langchain-modal
- langchain-quickjs
- langchain-runloop
default: deepagents
dangerous-nonmain-release:
required: false
type: boolean
default: false
description: "Release from a non-main branch (danger!) - Only use for hotfixes"
dangerous-skip-sdk-pin-check:
required: false
type: boolean
default: false
description: "Skip CLI SDK pin validation (danger!) - Only use when intentionally pinning an older SDK"
env:
PYTHON_VERSION: "3.11"
UV_NO_SYNC: "true"
UV_FROZEN: "true"
permissions:
contents: write # Required for creating GitHub releases
jobs:
# Determine working directory from package input
setup:
runs-on: ubuntu-latest
outputs:
package: ${{ steps.parse.outputs.package }}
working-dir: ${{ steps.parse.outputs.working-dir }}
steps:
- name: Parse package input
id: parse
run: |
PACKAGE="${{ inputs.package }}"
echo "package=$PACKAGE" >> $GITHUB_OUTPUT
# Map package name to working directory
case "$PACKAGE" in
deepagents)
echo "working-dir=libs/deepagents" >> $GITHUB_OUTPUT
;;
deepagents-cli)
echo "working-dir=libs/cli" >> $GITHUB_OUTPUT
;;
deepagents-acp)
echo "working-dir=libs/acp" >> $GITHUB_OUTPUT
;;
deepagents-evals)
echo "working-dir=libs/evals" >> $GITHUB_OUTPUT
;;
langchain-daytona)
echo "working-dir=libs/partners/daytona" >> $GITHUB_OUTPUT
;;
langchain-modal)
echo "working-dir=libs/partners/modal" >> $GITHUB_OUTPUT
;;
langchain-quickjs)
echo "working-dir=libs/partners/quickjs" >> $GITHUB_OUTPUT
;;
langchain-runloop)
echo "working-dir=libs/partners/runloop" >> $GITHUB_OUTPUT
;;
*)
echo "Error: Unknown package '$PACKAGE'"
echo "Valid packages are: deepagents, deepagents-cli, deepagents-acp, deepagents-evals, langchain-daytona, langchain-modal, langchain-quickjs, langchain-runloop"
exit 1
;;
esac
# Build the distribution package and extract version info
# Runs in isolated environment with minimal permissions for security
build:
needs: setup
if: github.ref == 'refs/heads/main' || inputs.dangerous-nonmain-release
runs-on: ubuntu-latest
permissions:
contents: read
env:
WORKING_DIR: ${{ needs.setup.outputs.working-dir }}
outputs:
pkg-name: ${{ steps.check-version.outputs.pkg-name }}
version: ${{ steps.check-version.outputs.version }}
steps:
- uses: actions/checkout@v6
- name: Set up Python + uv
uses: "./.github/actions/uv_setup"
with:
python-version: ${{ env.PYTHON_VERSION }}
# We want to keep this build stage *separate* from the release stage,
# so that there's no sharing of permissions between them.
# (Release stage has trusted publishing and GitHub repo contents write access,
#
# Otherwise, a malicious `build` step (e.g. via a compromised dependency)
# could get access to our GitHub or PyPI credentials.
#
# Per the trusted publishing GitHub Action:
# > It is strongly advised to separate jobs for building [...]
# > from the publish job.
# https://github.com/pypa/gh-action-pypi-publish#non-goals
- name: Build project for distribution
run: uv build
working-directory: ${{ env.WORKING_DIR }}
- name: Upload build
uses: actions/upload-artifact@v7
with:
name: dist
path: ${{ env.WORKING_DIR }}/dist/
- name: Check version
id: check-version
shell: python
working-directory: ${{ env.WORKING_DIR }}
run: |
import os
import tomllib
with open("pyproject.toml", "rb") as f:
data = tomllib.load(f)
pkg_name = data["project"]["name"]
version = data["project"]["version"]
with open(os.environ["GITHUB_OUTPUT"], "a") as f:
f.write(f"pkg-name={pkg_name}\n")
f.write(f"version={version}\n")
# Generate release notes from CHANGELOG.md (with git log fallback)
# and collect contributor shoutouts from merged PRs
release-notes:
needs:
- setup
- build
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: read
env:
WORKING_DIR: ${{ needs.setup.outputs.working-dir }}
outputs:
release-body: ${{ steps.generate-release-body.outputs.release-body }}
tag: ${{ steps.check-tags.outputs.tag }}
steps:
- uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Check tags
id: check-tags
shell: bash
working-directory: ${{ env.WORKING_DIR }}
env:
PKG_NAME: ${{ needs.build.outputs.pkg-name }}
VERSION: ${{ needs.build.outputs.version }}
run: |
TAG="${PKG_NAME}==${VERSION}"
echo tag="$TAG" >> $GITHUB_OUTPUT
- name: Resolve previous tag and release commit
id: resolve-refs
env:
PKG_NAME: ${{ needs.build.outputs.pkg-name }}
VERSION: ${{ needs.build.outputs.version }}
run: |
# Determine previous tag
if [[ "$VERSION" == *"-"* ]]; then
BASE_VERSION=${VERSION%%-*}
PREV_TAG=$(git tag --sort=-creatordate | (grep -E "^${PKG_NAME}==${BASE_VERSION}$" || true) | head -1)
if [ -z "$PREV_TAG" ]; then
PREV_TAG=$(git tag --sort=-creatordate | (grep -E "^${PKG_NAME}==[0-9]+\.[0-9]+\.[0-9]+$" || true) | head -1)
fi
else
PREV_TAG="$PKG_NAME==${VERSION%.*}.$(( ${VERSION##*.} - 1 ))"
[[ "${VERSION##*.}" -eq 0 ]] && PREV_TAG=""
if [ -z "$PREV_TAG" ]; then
PREV_TAG=$(git tag --sort=-creatordate | (grep -E "^${PKG_NAME}==[0-9]+\.[0-9]+\.[0-9]+$" || true) | head -1)
fi
fi
# Validate prev tag exists
if [ -n "$PREV_TAG" ] && [ "$PREV_TAG" != "$PKG_NAME==0.0.0" ]; then
GIT_TAG_RESULT=$(git tag -l "$PREV_TAG")
[ -z "$GIT_TAG_RESULT" ] && PREV_TAG=""
else
PREV_TAG=""
fi
echo "Previous tag: $PREV_TAG"
echo "prev-tag=$PREV_TAG" >> "$GITHUB_OUTPUT"
# Resolve the actual release commit instead of using HEAD.
# release-please always updates CHANGELOG.md in the release commit,
# so on workflow_call this resolves to HEAD (effectively a no-op).
# On workflow_dispatch (manual/recovery), HEAD may be ahead of the
# release commit — this avoids attributing post-release commits to
# this release's contributor list.
RELEASE_COMMIT=$(git log -1 --format=%H -- "$WORKING_DIR/CHANGELOG.md")
if [ -z "$RELEASE_COMMIT" ]; then
echo "Warning: no CHANGELOG.md history found, falling back to HEAD"
RELEASE_COMMIT=$(git rev-parse HEAD)
fi
echo "Release commit (from CHANGELOG.md): $RELEASE_COMMIT"
echo "release-commit=$RELEASE_COMMIT" >> "$GITHUB_OUTPUT"
- name: Generate release body
id: generate-release-body
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PKG_NAME: ${{ needs.build.outputs.pkg-name }}
VERSION: ${{ needs.build.outputs.version }}
PREV_TAG: ${{ steps.resolve-refs.outputs.prev-tag }}
RELEASE_COMMIT: ${{ steps.resolve-refs.outputs.release-commit }}
run: |
if [ -z "$RELEASE_COMMIT" ]; then
echo "::error::RELEASE_COMMIT is empty — resolve-refs step may have failed"
exit 1
fi
CHANGELOG_PATH="$WORKING_DIR/CHANGELOG.md"
RELEASE_BODY=""
# Try to extract current version's section from CHANGELOG.md
if [ -f "$CHANGELOG_PATH" ]; then
echo "Found CHANGELOG.md, extracting version $VERSION section..."
# Extract section between current version header and next version header (or EOF)
# Matches headers like: ## [0.0.16] or ## 0.0.16
RELEASE_BODY=$(awk -v ver="$VERSION" '
BEGIN { found=0; printing=0 }
/^## \[?[0-9]+\.[0-9]+\.[0-9]+/ {
if (printing) { exit }
if (index($0, ver)) { found=1; printing=1; next }
}
printing { print }
' "$CHANGELOG_PATH")
if [ -n "$RELEASE_BODY" ]; then
echo "Successfully extracted changelog for version $VERSION"
else
echo "Could not find version $VERSION in CHANGELOG.md"
fi
else
echo "No CHANGELOG.md found at $CHANGELOG_PATH"
fi
# Fallback to git log if CHANGELOG extraction failed
if [ -z "$RELEASE_BODY" ]; then
echo "Falling back to git log for release notes..."
FALLBACK_PREV="$PREV_TAG"
if [ -z "$FALLBACK_PREV" ]; then
PREAMBLE="Initial release"
FALLBACK_PREV=$(git rev-list --max-parents=0 "$RELEASE_COMMIT")
else
PREAMBLE="Changes since $FALLBACK_PREV"
fi
GIT_LOG=$(git log --format="%s" "$FALLBACK_PREV".."$RELEASE_COMMIT" -- "$WORKING_DIR")
RELEASE_BODY=$(printf "%s\n%s" "$PREAMBLE" "$GIT_LOG")
fi
# ── Collect contributors from merged PRs ──
# Get commits between previous tag and release commit for this package
if [ -z "$PREV_TAG" ]; then
COMMITS=$(git rev-list "$RELEASE_COMMIT" -- "$WORKING_DIR" | head -100)
else
COMMITS=$(git rev-list "$PREV_TAG".."$RELEASE_COMMIT" -- "$WORKING_DIR" | head -100)
fi
# Find PRs and collect contributors (GitHub username + optional Twitter/LinkedIn)
declare -A TWITTER_HANDLES # Map: github_username -> twitter_handle (or empty)
declare -A LINKEDIN_URLS # Map: github_username -> linkedin_url (or empty)
SEEN_PRS=""
for sha in $COMMITS; do
# Get PR number for this commit (if merged via PR)
PR_NUM=$(gh api "/repos/${{ github.repository }}/commits/$sha/pulls" \
--jq '.[0].number // empty' 2>/dev/null || true)
if [ -n "$PR_NUM" ] && [[ ! "$SEEN_PRS" =~ ":$PR_NUM:" ]]; then
SEEN_PRS="$SEEN_PRS:$PR_NUM:"
# Get PR author, body, and labels
PR_DATA=$(gh pr view "$PR_NUM" --json author,body,labels 2>/dev/null || true)
if [ -n "$PR_DATA" ]; then
GH_USER=$(echo "$PR_DATA" | jq -r '.author.login // empty')
PR_BODY=$(echo "$PR_DATA" | jq -r '.body // empty')
# Skip bots and automated accounts
IS_BOT=$(echo "$PR_DATA" | jq -r '.author.is_bot // false')
if [ "$IS_BOT" = "true" ]; then
echo "Skipping bot account: $GH_USER (PR #$PR_NUM)"
continue
fi
# Skip internal contributors (PRs labeled "internal" by tag-external-contributions workflow)
IS_INTERNAL=$(echo "$PR_DATA" | jq -r '.labels[].name // empty' | grep -qx "internal" && echo "true" || echo "false")
if [ "$IS_INTERNAL" = "true" ]; then
echo "Skipping internal contributor: $GH_USER (PR #$PR_NUM)"
continue
fi
if [ -n "$GH_USER" ]; then
# Extract Twitter handle if present (matches "Twitter: @handle" or "Twitter: handle")
TWITTER=$(echo "$PR_BODY" | grep -iE '^\s*Twitter:\s' | sed -nE 's/.*:[[:space:]]*@?[[:space:]]*([a-zA-Z0-9_]+).*/\1/p' | head -1 || true)
# Extract LinkedIn URL if present (matches "LinkedIn: https://linkedin.com/in/username" or similar)
LINKEDIN=$(echo "$PR_BODY" | grep -iE '^\s*LinkedIn:\s' | grep -oE '(https?://)?(www\.)?linkedin\.com/in/[a-zA-Z0-9_-]+/?' | head -1 || true)
# Add user if not seen, or update socials if newly provided
if [ -z "${TWITTER_HANDLES[$GH_USER]+x}" ]; then
TWITTER_HANDLES[$GH_USER]="$TWITTER"
LINKEDIN_URLS[$GH_USER]="$LINKEDIN"
else
[ -n "$TWITTER" ] && [ -z "${TWITTER_HANDLES[$GH_USER]}" ] && TWITTER_HANDLES[$GH_USER]="$TWITTER"
[ -n "$LINKEDIN" ] && [ -z "${LINKEDIN_URLS[$GH_USER]}" ] && LINKEDIN_URLS[$GH_USER]="$LINKEDIN"
fi
fi
fi
fi
done
# Build contributor list: @ghuser ([Twitter](url), [LinkedIn](url)) or just @ghuser
CONTRIBUTOR_LIST=""
for GH_USER in "${!TWITTER_HANDLES[@]}"; do
TWITTER="${TWITTER_HANDLES[$GH_USER]}"
LINKEDIN="${LINKEDIN_URLS[$GH_USER]}"
# Build social links
SOCIALS=""
if [ -n "$TWITTER" ]; then
SOCIALS="[Twitter](https://x.com/$TWITTER)"
fi
if [ -n "$LINKEDIN" ]; then
# Ensure LinkedIn URL has https:// prefix
if [[ ! "$LINKEDIN" =~ ^https?:// ]]; then
LINKEDIN="https://$LINKEDIN"
fi
if [ -n "$SOCIALS" ]; then
SOCIALS="$SOCIALS, [LinkedIn]($LINKEDIN)"
else
SOCIALS="[LinkedIn]($LINKEDIN)"
fi
fi
if [ -n "$SOCIALS" ]; then
ENTRY="@$GH_USER ($SOCIALS)"
else
ENTRY="@$GH_USER"
fi
if [ -z "$CONTRIBUTOR_LIST" ]; then
CONTRIBUTOR_LIST="$ENTRY"
else
CONTRIBUTOR_LIST="$CONTRIBUTOR_LIST, $ENTRY"
fi
done
echo "Found contributors: $CONTRIBUTOR_LIST"
# Append contributor shoutouts
if [ -n "$CONTRIBUTOR_LIST" ]; then
RELEASE_BODY=$(printf "%s\n\n---\n\nThanks to our community contributors: %s" "$RELEASE_BODY" "$CONTRIBUTOR_LIST")
fi
# Output release body using heredoc for proper multiline handling
{
echo 'release-body<<EOF'
echo "$RELEASE_BODY"
echo EOF
} >> "$GITHUB_OUTPUT"
test-pypi-publish:
needs:
- setup
- build
- pre-release-checks
runs-on: ubuntu-latest
permissions:
# This permission is used for trusted publishing:
# https://blog.pypi.org/posts/2023-04-20-introducing-trusted-publishers/
#
# Trusted publishing has to also be configured on PyPI for each package:
# https://docs.pypi.org/trusted-publishers/adding-a-publisher/
id-token: write
env:
WORKING_DIR: ${{ needs.setup.outputs.working-dir }}
steps:
- uses: actions/checkout@v6
- uses: actions/download-artifact@v8
with:
name: dist
path: ${{ env.WORKING_DIR }}/dist/
- name: Publish to test PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
packages-dir: ${{ env.WORKING_DIR }}/dist/
verbose: true
print-hash: true
repository-url: https://test.pypi.org/legacy/
# We overwrite any existing distributions with the same name and version.
# This is *only for CI use* and is *extremely dangerous* otherwise!
# https://github.com/pypa/gh-action-pypi-publish#tolerating-release-package-file-duplicates
skip-existing: true
# Temp workaround since attestations are on by default as of gh-action-pypi-publish v1.11.0
attestations: false
pre-release-checks:
needs:
- setup
- build
runs-on: ubuntu-latest
permissions:
contents: read
timeout-minutes: 20
env:
WORKING_DIR: ${{ needs.setup.outputs.working-dir }}
steps:
- uses: actions/checkout@v6
# We explicitly *don't* set up caching here. This ensures our tests are
# maximally sensitive to catching breakage.
#
# For example, here's a way that caching can cause a falsely-passing test:
# - Make the package manifest no longer list a dependency package
# as a requirement. This means it won't be installed by `pip install`,
# and attempting to use it would cause a crash.
# - That dependency used to be required, so it may have been cached.
# When restoring the venv packages from cache, that dependency gets included.
# - Tests pass, because the dependency is present even though it wasn't specified.
# - The package is published, and it breaks on the missing dependency when
# used in the real world.
- name: Set up Python + uv
uses: "./.github/actions/uv_setup"
id: setup-python
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: actions/download-artifact@v8
with:
name: dist
path: ${{ env.WORKING_DIR }}/dist/
- name: Verify CLI pins latest SDK version
if: needs.build.outputs.pkg-name == 'deepagents-cli' && !inputs.dangerous-skip-sdk-pin-check
run: |
SDK_VERSION=$(sed -nE 's/^version = "([^"]*)".*/\1/p' libs/deepagents/pyproject.toml | head -1)
if [ -z "$SDK_VERSION" ]; then
echo "::error file=libs/deepagents/pyproject.toml::Failed to extract SDK version. Expected a line matching: version = \"X.Y.Z\""
exit 1
fi
CLI_SDK_PIN=$(sed -nE 's/.*deepagents==([0-9]+\.[0-9]+\.[0-9]+).*/\1/p' libs/cli/pyproject.toml | head -1)
if [ -z "$CLI_SDK_PIN" ]; then
echo "::error file=libs/cli/pyproject.toml::Failed to extract CLI SDK pin. Expected a dependency matching: deepagents==X.Y.Z"
exit 1
fi
if [ "$SDK_VERSION" != "$CLI_SDK_PIN" ]; then
echo "::error::CLI SDK pin does not match SDK version!"
echo "SDK version (libs/deepagents/pyproject.toml): $SDK_VERSION"
echo "CLI SDK pin (libs/cli/pyproject.toml): $CLI_SDK_PIN"
echo ""
echo "Update the deepagents dependency in libs/cli/pyproject.toml to deepagents==$SDK_VERSION"
echo "Or re-run with 'dangerous-skip-sdk-pin-check' enabled to bypass."
exit 1
else
echo "CLI SDK pin matches SDK version: $SDK_VERSION"
fi
- name: Import dist package
shell: bash
working-directory: ${{ env.WORKING_DIR }}
env:
PKG_NAME: ${{ needs.build.outputs.pkg-name }}
VERSION: ${{ needs.build.outputs.version }}
# Here we use:
# - The default regular PyPI index as the *primary* index, meaning
# that it takes priority (https://pypi.org/simple)
# - The test PyPI index as an extra index, so that any dependencies that
# are not found on test PyPI can be resolved and installed anyway.
# (https://test.pypi.org/simple). This will include the PKG_NAME==VERSION
# package because VERSION will not have been uploaded to regular PyPI yet.
# - attempt install again after 5 seconds if it fails because there is
# sometimes a delay in availability on test pypi
run: |
uv venv
VIRTUAL_ENV=.venv uv pip install dist/*.whl
# Replace all dashes in the package name with underscores,
# since that's how Python imports packages with dashes in the name.
IMPORT_NAME="$(echo "$PKG_NAME" | sed s/-/_/g)"
uv run python -c "import $IMPORT_NAME; print(dir($IMPORT_NAME))"
- name: Import test dependencies
run: uv sync --group test
working-directory: ${{ env.WORKING_DIR }}
# Overwrite the local version of the package with the built version
- name: Import published package (again)
working-directory: ${{ env.WORKING_DIR }}
shell: bash
env:
PKG_NAME: ${{ needs.build.outputs.pkg-name }}
VERSION: ${{ needs.build.outputs.version }}
run: |
VIRTUAL_ENV=.venv uv pip install dist/*.whl
- name: Run unit tests
run: make test
working-directory: ${{ env.WORKING_DIR }}
- name: Run integration tests
# Only run integration tests if they exist (currently only for deepagents package)
if: false # Temporarily disabled
run: make integration_test || echo "No integration tests found, skipping..."
working-directory: ${{ env.WORKING_DIR }}
publish:
# Publishes the package to PyPI
needs:
- setup
- build
- test-pypi-publish
- pre-release-checks
runs-on: ubuntu-latest
permissions:
# This permission is used for trusted publishing:
# https://blog.pypi.org/posts/2023-04-20-introducing-trusted-publishers/
#
# Trusted publishing has to also be configured on PyPI for each package:
# https://docs.pypi.org/trusted-publishers/adding-a-publisher/
id-token: write
env:
WORKING_DIR: ${{ needs.setup.outputs.working-dir }}
defaults:
run:
working-directory: ${{ env.WORKING_DIR }}
steps:
- uses: actions/checkout@v6
- name: Set up Python + uv
uses: "./.github/actions/uv_setup"
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: actions/download-artifact@v8
with:
name: dist
path: ${{ env.WORKING_DIR }}/dist/
- name: Publish package distributions to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
packages-dir: ${{ env.WORKING_DIR }}/dist/
verbose: true
print-hash: true
# Temp workaround since attestations are on by default as of gh-action-pypi-publish v1.11.0
attestations: false
# Create GitHub release after checks pass
mark-release:
needs:
- setup
- build
- release-notes
- test-pypi-publish
- pre-release-checks
- publish
if: always() && needs.pre-release-checks.result == 'success' && needs.publish.result == 'success'
runs-on: ubuntu-latest
permissions:
# This permission is needed by `ncipollo/release-action` to
# create the GitHub release/tag
contents: write
# This permission is needed to update release PR labels
pull-requests: write
env:
WORKING_DIR: ${{ needs.setup.outputs.working-dir }}
defaults:
run:
working-directory: ${{ env.WORKING_DIR }}
steps:
- uses: actions/checkout@v6
- name: Set up Python + uv
uses: "./.github/actions/uv_setup"
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: actions/download-artifact@v8
with:
name: dist
path: ${{ env.WORKING_DIR }}/dist/
- name: Create Release
uses: ncipollo/release-action@v1
with:
artifacts: "${{ env.WORKING_DIR }}/dist/*"
token: ${{ secrets.GITHUB_TOKEN }}
generateReleaseNotes: false
tag: ${{ needs.build.outputs.pkg-name }}==${{ needs.build.outputs.version }}
body: ${{ needs.release-notes.outputs.release-body }}
commit: ${{ github.sha }}
makeLatest: ${{ needs.build.outputs.pkg-name == 'deepagents' }}
draft: false
# Mark the release PR as tagged so release-please knows it's been released
# This is required because skip-github-release is true in release-please config
- name: Update release PR label
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PKG_NAME: ${{ needs.build.outputs.pkg-name }}
run: |
UPDATED=false
# Try 1: find PR associated with this commit
PR_NUMBER=$(gh api "/repos/${{ github.repository }}/commits/${{ github.sha }}/pulls" --jq '.[0].number // empty' 2>/dev/null) || PR_NUMBER=""
if [ -n "$PR_NUMBER" ]; then
HAS_PENDING=$(gh pr view "$PR_NUMBER" --json labels --jq '.labels[].name' | grep -q "autorelease: pending" && echo "true" || echo "false")
if [ "$HAS_PENDING" = "true" ]; then
echo "Found release PR #$PR_NUMBER with 'autorelease: pending', updating labels..."
if gh pr edit "$PR_NUMBER" --remove-label "autorelease: pending" --add-label "autorelease: tagged"; then
UPDATED=true
else
echo "::warning::Failed to update labels on PR #$PR_NUMBER via commit lookup. Falling through to label search..."
fi
else
echo "PR #$PR_NUMBER (from commit lookup) is not the release PR, falling through to label search..."
fi
else
echo "No PR found via commit ${{ github.sha }}, falling through to label search..."
fi
# Try 2: fallback label search when commit-based lookup didn't find the release PR.
# This handles manual dispatch where github.sha may not be the merge commit
# (e.g., other commits landed on main between the merge and the manual trigger).
if [ "$UPDATED" = "false" ]; then
PR_NUMBER=$(gh pr list --repo "${{ github.repository }}" \
--state merged \
--label "autorelease: pending" \
--label "release" \
--search "\"release($PKG_NAME)\" in:title" \
--json number --jq '.[0].number // empty') || {
echo "::warning::gh pr list failed. Label swap could not be performed automatically."
echo "Manual fix: gh pr edit <PR_NUMBER> --remove-label 'autorelease: pending' --add-label 'autorelease: tagged'"
exit 0
}
if [ -n "$PR_NUMBER" ]; then
echo "Found release PR #$PR_NUMBER via label search, updating labels..."
if ! gh pr edit "$PR_NUMBER" --remove-label "autorelease: pending" --add-label "autorelease: tagged"; then
echo "::warning::Failed to update labels on PR #$PR_NUMBER. Manual fix required."
echo "Run: gh pr edit $PR_NUMBER --remove-label 'autorelease: pending' --add-label 'autorelease: tagged'"
fi
else
echo "::warning::No release PR with 'autorelease: pending' found for $PKG_NAME. Manual label update may be required."
fi
fi
================================================
FILE: .github/workflows/require_issue_link.yml
================================================
# Require external PRs to link to an approved issue or discussion using
# GitHub auto-close keywords (Fixes #NNN, Closes #NNN, Resolves #NNN),
# AND require that the PR author is assigned to the linked issue.
#
# - Reacts to the "external" label applied by pr_labeler.yml,
# avoiding a duplicate org membership check.
# - Also re-checks on PR edits/reopens for PRs that already have the label.
# - Bypasses the check for PRs with the "trusted-contributor" label.
# - Validates the PR author is an assignee on at least one linked issue.
# - Adds a "missing-issue-link" label on failure; removes it on pass.
# - Automatically reopens PRs that were closed by this workflow once the
# check passes (e.g. author edits the body to add a valid issue link).
# - Respects maintainer reopens: if an org member manually reopens a
# previously auto-closed PR, enforcement is skipped so it stays open.
# - Posts (or updates) a comment explaining the requirement on failure.
# - Cancels all other in-progress/queued CI runs for the PR on closure.
# - Deduplicates comments via an HTML marker so re-runs don't spam.
#
# Dependency: pr_labeler.yml must run first to apply the "external" label
# on new PRs. This workflow chains off that classification via the "labeled"
# event. It does NOT trigger on "opened" because new PRs have no labels yet,
# so the job-level gate would always skip — producing noisy "Skipped" checks.
name: Require Issue Link
on:
pull_request_target:
types: [edited, reopened, labeled]
# ──────────────────────────────────────────────────────────────────────────────
# Enforcement gate: set to 'true' to activate the issue link requirement.
# When 'false', the workflow still runs the check logic (useful for dry-run
# visibility) but will NOT label, comment, close, or fail PRs.
# ──────────────────────────────────────────────────────────────────────────────
env:
ENFORCE_ISSUE_LINK: 'true'
permissions:
contents: read
jobs:
check-issue-link:
# Run when the "external" label is added, or on edit/reopen if already labeled.
# Skip entirely when the PR already carries "trusted-contributor".
if: >-
!contains(github.event.pull_request.labels.*.name, 'trusted-contributor') &&
(
(github.event.action == 'labeled' && github.event.label.name == 'external') ||
(github.event.action != 'labeled' && contains(github.event.pull_request.labels.*.name, 'external'))
)
runs-on: ubuntu-latest
permissions:
actions: write
pull-requests: write
steps:
- name: Check for issue link and assignee
id: check-link
uses: actions/github-script@v8
with:
script: |
const { owner, repo } = context.repo;
const prNumber = context.payload.pull_request.number;
// If a maintainer (org member) manually reopened a PR that was
// previously auto-closed by this workflow (indicated by the
// "missing-issue-link" label), respect that decision and skip
// enforcement. Without this, the workflow would immediately
// re-close the PR on the "reopened" event.
const prLabels = context.payload.pull_request.labels.map(l => l.name);
if (context.payload.action === 'reopened' && prLabels.includes('missing-issue-link')) {
const sender = context.payload.sender?.login;
if (!sender) {
throw new Error('Unexpected: reopened event has no sender — cannot check org membership');
}
try {
const { data: membership } = await github.rest.orgs.getMembershipForUser({
org: 'langchain-ai',
username: sender,
});
if (membership.state === 'active') {
console.log(`Maintainer ${sender} reopened PR #${prNumber} — skipping enforcement`);
core.setOutput('has-link', 'true');
core.setOutput('is-assigned', 'true');
return;
} else {
console.log(`${sender} is an org member but state is "${membership.state}" — proceeding with check`);
}
} catch (e) {
if (e.status === 404) {
console.log(`${sender} is not an org member — proceeding with check`);
} else {
const status = e.status ?? 'unknown';
throw new Error(
`Membership check failed for ${sender} (HTTP ${status}): ${e.message}`,
);
}
}
}
// Fetch live labels to handle the race where "external" fires
// before "trusted-contributor" appears in the event payload.
const { data: liveLabels } = await github.rest.issues.listLabelsOnIssue({
owner, repo, issue_number: prNumber,
});
if (liveLabels.some(l => l.name === 'trusted-contributor')) {
console.log('PR has trusted-contributor label — bypassing issue link check');
core.setOutput('has-link', 'true');
core.setOutput('is-assigned', 'true');
return;
}
const body = context.payload.pull_request.body || '';
const pattern = /(?:close[sd]?|fix(?:e[sd])?|resolve[sd]?)\s*#(\d+)/gi;
const matches = [...body.matchAll(pattern)];
if (matches.length === 0) {
console.log('No issue link found in PR body');
core.setOutput('has-link', 'false');
core.setOutput('is-assigned', 'false');
return;
}
const issues = matches.map(m => `#${m[1]}`).join(', ');
console.log(`Found issue link(s): ${issues}`);
core.setOutput('has-link', 'true');
// Check whether the PR author is assigned to at least one linked issue
const prAuthor = context.payload.pull_request.user.login;
const MAX_ISSUES = 5;
const allIssueNumbers = [...new Set(matches.map(m => parseInt(m[1], 10)))];
const issueNumbers = allIssueNumbers.slice(0, MAX_ISSUES);
if (allIssueNumbers.length > MAX_ISSUES) {
core.warning(
`PR references ${allIssueNumbers.length} issues — only checking the first ${MAX_ISSUES}`,
);
}
let assignedToAny = false;
for (const num of issueNumbers) {
try {
const { data: issue } = await github.rest.issues.get({
owner, repo, issue_number: num,
});
const assignees = issue.assignees.map(a => a.login.toLowerCase());
if (assignees.includes(prAuthor.toLowerCase())) {
console.log(`PR author "${prAuthor}" is assigned to #${num}`);
assignedToAny = true;
break;
} else {
console.log(`PR author "${prAuthor}" is NOT assigned to #${num} (assignees: ${assignees.join(', ') || 'none'})`);
}
} catch (error) {
if (error.status === 404) {
console.log(`Issue #${num} not found — skipping`);
} else {
// Non-404 errors (rate limit, server error) must not be
// silently skipped — they could cause false enforcement
// (closing a legitimate PR whose assignment can't be verified).
throw new Error(
`Cannot verify assignee for issue #${num} (${error.status}): ${error.message}`,
);
}
}
}
core.setOutput('is-assigned', assignedToAny ? 'true' : 'false');
- name: Add missing-issue-link label
if: >-
env.ENFORCE_ISSUE_LINK == 'true' &&
(steps.check-link.outputs.has-link != 'true' || steps.check-link.outputs.is-assigned != 'true')
uses: actions/github-script@v8
with:
script: |
const { owner, repo } = context.repo;
const prNumber = context.payload.pull_request.number;
const labelName = 'missing-issue-link';
// Ensure the label exists (no checkout/shared helper available)
try {
await github.rest.issues.getLabel({ owner, repo, name: labelName });
} catch (e) {
if (e.status !== 404) throw e;
try {
await github.rest.issues.createLabel({
owner, repo, name: labelName, color: 'b76e79',
});
} catch (createErr) {
if (createErr.status !== 422) throw createErr;
}
}
await github.rest.issues.addLabels({
owner, repo, issue_number: prNumber, labels: [labelName],
});
- name: Remove missing-issue-link label and reopen PR
if: >-
env.ENFORCE_ISSUE_LINK == 'true' &&
steps.check-link.outputs.has-link == 'true' && steps.check-link.outputs.is-assigned == 'true'
uses: actions/github-script@v8
with:
script: |
const { owner, repo } = context.repo;
const prNumber = context.payload.pull_request.number;
try {
await github.rest.issues.removeLabel({
owner, repo, issue_number: prNumber, name: 'missing-issue-link',
});
} catch (error) {
if (error.status !== 404) throw error;
}
// Reopen if this workflow previously closed the PR. We check the
// event payload labels (not live labels) because we already removed
// missing-issue-link above; the payload still reflects pre-step state.
const labels = context.payload.pull_request.labels.map(l => l.name);
if (context.payload.pull_request.state === 'closed' && labels.includes('missing-issue-link')) {
await github.rest.pulls.update({
owner,
repo,
pull_number: prNumber,
state: 'open',
});
console.log(`Reopened PR #${prNumber}`);
}
- name: Post comment, close PR, and fail
if: >-
env.ENFORCE_ISSUE_LINK == 'true' &&
(steps.check-link.outputs.has-link != 'true' || steps.check-link.outputs.is-assigned != 'true')
uses: actions/github-script@v8
with:
script: |
const { owner, repo } = context.repo;
const prNumber = context.payload.pull_request.number;
const hasLink = '${{ steps.check-link.outputs.has-link }}' === 'true';
const isAssigned = '${{ steps.check-link.outputs.is-assigned }}' === 'true';
const marker = '<!-- require-issue-link -->';
let lines;
if (!hasLink) {
lines = [
marker,
'**This PR has been automatically closed** because it does not link to an approved issue.',
'',
'All external contributions must reference an approved issue or discussion. Please:',
'1. Find or [open an issue](https://github.com/' + owner + '/' + repo + '/issues/new/choose) describing the change',
'2. Wait for a maintainer to approve and assign you',
'3. Add `Fixes #<issue_number>`, `Closes #<issue_number>`, or `Resolves #<issue_number>` to your PR description and the PR will be reopened automatically',
];
} else {
lines = [
marker,
'**This PR has been automatically closed** because you are not assigned to the linked issue.',
'',
'External contributors must be assigned to an issue before opening a PR for it. Please:',
'1. Comment on the linked issue to request assignment from a maintainer',
'2. Once assigned, edit your PR description and the PR will be reopened automatically',
];
}
const body = lines.join('\n');
// Deduplicate: check for existing comment with the marker
const comments = await github.paginate(
github.rest.issues.listComments,
{ owner, repo, issue_number: prNumber, per_page: 100 },
);
const existing = comments.find(c => c.body && c.body.includes(marker));
if (!existing) {
await github.rest.issues.createComment({
owner,
repo,
issue_number: prNumber,
body,
});
console.log('Posted requirement comment');
} else if (existing.body !== body) {
await github.rest.issues.updateComment({
owner,
repo,
comment_id: existing.id,
body,
});
console.log('Updated existing comment with new message');
} else {
console.log('Comment already exists — skipping');
}
// Close the PR
if (context.payload.pull_request.state === 'open') {
await github.rest.pulls.update({
owner,
repo,
pull_number: prNumber,
state: 'closed',
});
console.log(`Closed PR #${prNumber}`);
}
// Cancel all other in-progress and queued workflow runs for this PR
const headSha = context.payload.pull_request.head.sha;
for (const status of ['in_progress', 'queued']) {
const runs = await github.paginate(
github.rest.actions.listWorkflowRunsForRepo,
{ owner, repo, head_sha: headSha, status, per_page: 100 },
);
for (const run of runs) {
if (run.id === context.runId) continue;
try {
await github.rest.actions.cancelWorkflowRun({
owner, repo, run_id: run.id,
});
console.log(`Cancelled ${status} run ${run.id} (${run.name})`);
} catch (err) {
console.log(`Could not cancel run ${run.id}: ${err.message}`);
}
}
}
const reason = !hasLink
? 'PR must reference an issue using auto-close keywords (e.g., "Fixes #123").'
: 'PR author must be assigned to the linked issue.';
core.setFailed(reason);
================================================
FILE: .github/workflows/sync_priority_labels.yml
================================================
# Sync priority labels (p0–p3) from linked issues to PRs.
#
# Triggers:
# 1. PR opened/edited — parse issue links, copy priority label from issue(s)
# 2. Issue labeled/unlabeled — find open PRs that reference the issue, update
# 3. Manual dispatch — backfill open PRs (up to max_items)
#
# Priority labels are mutually exclusive on a PR. When a PR links to multiple
# issues with different priorities, the highest wins (p0 > p1 > p2 > p3).
name: Sync Priority Labels
on:
# pull_request_target is safe here: we never check out or execute the
# PR's code — only read the PR body and manage labels.
pull_request_target:
types: [opened, edited]
issues:
types: [labeled, unlabeled]
workflow_dispatch:
inputs:
max_items:
description: "Maximum number of open PRs to process"
default: "200"
type: string
permissions:
contents: read
# Serialize per PR (on PR events), per issue (on issue events), or
# globally (backfill). Note: two different issues that both link to the
# same PR may still race; both jobs re-derive the full correct state, so
# last-writer-wins converges.
concurrency:
group: >-
${{ github.workflow }}-${{
github.event_name == 'pull_request_target'
&& format('pr-{0}', github.event.pull_request.number)
|| github.event_name == 'issues'
&& format('issue-{0}', github.event.issue.number)
|| 'backfill'
}}
cancel-in-progress: ${{ github.event_name != 'workflow_dispatch' }}
jobs:
# ── PR opened/edited: copy priority from linked issue(s) ──────────────
sync-from-issue:
if: github.event_name == 'pull_request_target'
runs-on: ubuntu-latest
permissions:
pull-requests: write
issues: write
steps:
- name: Sync priority label to PR
uses: actions/github-script@v8
with:
script: |
const { owner, repo } = context.repo;
const prNumber = context.payload.pull_request.number;
const body = context.payload.pull_request.body || '';
const PRIORITY_LABELS = ['p0', 'p1', 'p2', 'p3'];
const LINK_RE = /(?:close[sd]?|fix(?:e[sd])?|resolve[sd]?)\s*#(\d+)/gi;
// ── Helpers ──
function parseIssueNumbers(text) {
return [...new Set(
[...text.matchAll(LINK_RE)].map(m => parseInt(m[1], 10)),
)];
}
async function getIssueLabels(num) {
try {
const { data } = await github.rest.issues.get({
owner, repo, issue_number: num,
});
return data.labels.map(l => l.name);
} catch (e) {
if (e.status === 404) return null;
throw e;
}
}
function highestPriority(labelSets) {
let best = null;
for (const labels of labelSets) {
if (!labels) continue;
const idx = PRIORITY_LABELS.findIndex(p => labels.includes(p));
if (idx !== -1 && (best === null || idx < best)) best = idx;
}
return best;
}
async function getPrLabelNames(num) {
return (await github.paginate(
github.rest.issues.listLabelsOnIssue,
{ owner, repo, issue_number: num, per_page: 100 },
)).map(l => l.name);
}
async function removeLabel(num, name) {
try {
await github.rest.issues.removeLabel({
owner, repo, issue_number: num, name,
});
console.log(`Removed '${name}' from PR #${num}`);
} catch (e) {
if (e.status !== 404) throw e;
}
}
async function ensureLabel(name) {
try {
await github.rest.issues.getLabel({ owner, repo, name });
} catch (e) {
if (e.status !== 404) throw e;
try {
await github.rest.issues.createLabel({
owner, repo, name, color: 'b76e79',
});
} catch (createErr) {
if (createErr.status !== 422) throw createErr;
}
}
}
async function syncPrLabels(prNum, targetLabel) {
const prLabels = await getPrLabelNames(prNum);
// Remove stale priority labels
for (const p of PRIORITY_LABELS) {
if (prLabels.includes(p) && p !== targetLabel) {
await removeLabel(prNum, p);
}
}
if (!targetLabel) return;
if (prLabels.includes(targetLabel)) {
console.log(`PR #${prNum} already has '${targetLabel}'`);
return;
}
await ensureLabel(targetLabel);
await github.rest.issues.addLabels({
owner, repo, issue_number: prNum, labels: [targetLabel],
});
console.log(`Applied '${targetLabel}' to PR #${prNum}`);
}
// ── Main ──
const issueNumbers = parseIssueNumbers(body);
if (issueNumbers.length === 0) {
console.log('No issue links found in PR body');
return;
}
console.log(`Found linked issues: ${issueNumbers.map(n => '#' + n).join(', ')}`);
const labelSets = await Promise.all(issueNumbers.map(getIssueLabels));
const best = highestPriority(labelSets);
const targetLabel = best !== null ? PRIORITY_LABELS[best] : null;
if (targetLabel) {
console.log(`Highest priority across linked issues: ${targetLabel}`);
} else {
console.log('No priority labels found on linked issues');
}
await syncPrLabels(prNumber, targetLabel);
# ── Issue labeled/unlabeled: propagate to PRs that link to it ─────────
sync-to-prs:
if: >-
github.event_name == 'issues' &&
contains(fromJSON('["p0","p1","p2","p3"]'), github.event.label.name)
runs-on: ubuntu-latest
permissions:
pull-requests: write
issues: write
steps:
- name: Propagate priority label to linked PRs
uses: actions/github-script@v8
with:
script: |
const { owner, repo } = context.repo;
const issueNumber = context.payload.issue.number;
const action = context.payload.action;
const PRIORITY_LABELS = ['p0', 'p1', 'p2', 'p3'];
const LINK_RE = /(?:close[sd]?|fix(?:e[sd])?|resolve[sd]?)\s*#(\d+)/gi;
console.log(`Issue #${issueNumber} ${action} with '${context.payload.label.name}'`);
// ── Helpers ──
function parseIssueNumbers(text) {
return [...new Set(
[...text.matchAll(LINK_RE)].map(m => parseInt(m[1], 10)),
)];
}
async function getIssueLabels(num) {
try {
const { data } = await github.rest.issues.get({
owner, repo, issue_number: num,
});
return data.labels.map(l => l.name);
} catch (e) {
if (e.status === 404) return null;
throw e;
}
}
function highestPriority(labelSets) {
let best = null;
for (const labels of labelSets) {
if (!labels) continue;
const idx = PRIORITY_LABELS.findIndex(p => labels.includes(p));
if (idx !== -1 && (best === null || idx < best)) best = idx;
}
return best;
}
async function getPrLabelNames(num) {
return (await github.paginate(
github.rest.issues.listLabelsOnIssue,
{ owner, repo, issue_number: num, per_page: 100 },
)).map(l => l.name);
}
async function removeLabel(num, name) {
try {
await github.rest.issues.removeLabel({
owner, repo, issue_number: num, name,
});
console.log(`Removed '${name}' from PR #${num}`);
} catch (e) {
if (e.status !== 404) throw e;
}
}
async function ensureLabel(name) {
try {
await github.rest.issues.getLabel({ owner, repo, name });
} catch (e) {
if (e.status !== 404) throw e;
try {
await github.rest.issues.createLabel({
owner, repo, name, color: 'b76e79',
});
} catch (createErr) {
if (createErr.status !== 422) throw createErr;
}
}
}
async function syncPrLabels(prNum, targetLabel) {
const prLabels = await getPrLabelNames(prNum);
for (const p of PRIORITY_LABELS) {
if (prLabels.includes(p) && p !== targetLabel) {
await removeLabel(prNum, p);
}
}
if (!targetLabel) {
console.log(`No priority label remaining for PR #${prNum}`);
return;
}
if (prLabels.includes(targetLabel)) {
console.log(`PR #${prNum} already has '${targetLabel}'`);
return;
}
await ensureLabel(targetLabel);
await github.rest.issues.addLabels({
owner, repo, issue_number: prNum, labels: [targetLabel],
});
console.log(`Applied '${targetLabel}' to PR #${prNum}`);
}
// ── Find open PRs that reference this issue ──
// GitHub search treats the quoted number as a substring match
// across title, body, and comments — low issue numbers (e.g. #1)
// may return false positives. The specificLinkRe filter below
// prunes them, but legitimate PRs could be pushed out of the
// result page for very popular low numbers.
const specificLinkRe = new RegExp(
`(?:close[sd]?|fix(?:e[sd])?|resolve[sd]?)\\s*#${issueNumber}\\b`,
'i',
);
let prs;
try {
const result = await github.rest.search.issuesAndPullRequests({
q: `repo:${owner}/${repo} is:pr is:open "${issueNumber}"`,
per_page: 100,
});
prs = result.data.items;
} catch (e) {
if (e.status === 422) {
core.warning(`Search for PRs linking to #${issueNumber} returned 422 — skipping`);
return;
}
throw e;
}
const linkedPRs = prs.filter(pr => specificLinkRe.test(pr.body || ''));
if (linkedPRs.length === 0) {
console.log(`No open PRs link to issue #${issueNumber}`);
return;
}
console.log(`Found ${linkedPRs.length} PR(s) linking to #${issueNumber}: ${linkedPRs.map(p => '#' + p.number).join(', ')}`);
// Pre-fetch the triggering issue's labels (post-event state)
const triggeringLabels = await getPrLabelNames(issueNumber);
// ── Resolve and sync each linked PR ──
let failures = 0;
for (const pr of linkedPRs) {
try {
// A PR may link to multiple issues — re-derive the correct
// priority by checking all linked issues.
const allIssueNumbers = parseIssueNumbers(pr.body || '');
const labelSets = await Promise.all(
allIssueNumbers.map(num =>
num === issueNumber
? Promise.resolve(triggeringLabels)
: getIssueLabels(num),
),
);
const best = highestPriority(labelSets);
const targetLabel = best !== null ? PRIORITY_LABELS[best] : null;
await syncPrLabels(pr.number, targetLabel);
} catch (e) {
failures++;
core.warning(`Failed to sync PR #${pr.number}: ${e.message}`);
}
}
if (failures > 0) {
core.setFailed(`${failures} PR(s) failed to sync — check warnings above`);
}
# ── Manual backfill: sync priority labels on open PRs (up to max_items)
backfill:
if: github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
permissions:
pull-requests: write
issues: write
steps:
- name: Backfill priority labels on open PRs
uses: actions/github-script@v8
with:
script: |
const { owner, repo } = context.repo;
const rawMax = '${{ inputs.max_items }}';
const maxItems = parseInt(rawMax, 10);
if (isNaN(maxItems) || maxItems <= 0) {
core.setFailed(`Invalid max_items: "${rawMax}" — must be a positive integer`);
return;
}
const PRIORITY_LABELS = ['p0', 'p1', 'p2', 'p3'];
const LINK_RE = /(?:close[sd]?|fix(?:e[sd])?|resolve[sd]?)\s*#(\d+)/gi;
// ── Helpers ──
function parseIssueNumbers(text) {
return [...new Set(
[...text.matchAll(LINK_RE)].map(m => parseInt(m[1], 10)),
)];
}
async function getIssueLabels(num) {
try {
const { data } = await github.rest.issues.get({
owner, repo, issue_number: num,
});
return data.labels.map(l => l.name);
} catch (e) {
if (e.status === 404) return null;
throw e;
}
}
function highestPriority(labelSets) {
let best = null;
for (const labels of labelSets) {
if (!labels) continue;
const idx = PRIORITY_LABELS.findIndex(p => labels.includes(p));
if (idx !== -1 && (best === null || idx < best)) best = idx;
}
return best;
}
async function getPrLabelNames(num) {
return (await github.paginate(
github.rest.issues.listLabelsOnIssue,
{ owner, repo, issue_number: num, per_page: 100 },
)).map(l => l.name);
}
async function removeLabel(num, name) {
try {
await github.rest.issues.removeLabel({
owner, repo, issue_number: num, name,
});
} catch (e) {
if (e.status !== 404) throw e;
}
}
async function ensureLabel(name) {
try {
await github.rest.issues.getLabel({ owner, repo, name });
} catch (e) {
if (e.status !== 404) throw e;
try {
await github.rest.issues.createLabel({
owner, repo, name, color: 'b76e79',
});
} catch (createErr) {
if (createErr.status !== 422) throw createErr;
}
}
}
// ── Main ──
const prs = await github.paginate(github.rest.pulls.list, {
owner, repo, state: 'open', per_page: 100,
});
let processed = 0;
let updated = 0;
let failures = 0;
for (const pr of prs) {
if (processed >= maxItems) break;
processed++;
try {
const issueNumbers = parseIssueNumbers(pr.body || '');
if (issueNumbers.length === 0) continue;
const labelSets = await Promise.all(issueNumbers.map(getIssueLabels));
const best = highestPriority(labelSets);
const targetLabel = best !== null ? PRIORITY_LABELS[best] : null;
const prLabels = await getPrLabelNames(pr.number);
const currentPriority = PRIORITY_LABELS.find(p => prLabels.includes(p)) || null;
if (currentPriority === targetLabel) {
console.log(`PR #${pr.number}: already correct (${targetLabel || 'none'})`);
continue;
}
// Remove stale priority labels
for (const p of PRIORITY_LABELS) {
if (prLabels.includes(p) && p !== targetLabel) {
await removeLabel(pr.number, p);
}
}
gitextract_1kmmfaan/ ├── .github/ │ ├── CODEOWNERS │ ├── ISSUE_TEMPLATE/ │ │ ├── bug-report.yml │ │ ├── config.yml │ │ ├── feature-request.yml │ │ └── privileged.yml │ ├── PULL_REQUEST_TEMPLATE.md │ ├── RELEASING.md │ ├── actions/ │ │ └── uv_setup/ │ │ └── action.yml │ ├── dependabot.yml │ ├── scripts/ │ │ ├── aggregate_evals.py │ │ ├── check_extras_sync.py │ │ ├── check_version_equality.py │ │ ├── models.py │ │ ├── pr-labeler-config.json │ │ └── pr-labeler.js │ └── workflows/ │ ├── _benchmark.yml │ ├── _lint.yml │ ├── _test.yml │ ├── auto-label-by-package.yml │ ├── check_extras_sync.yml │ ├── check_lockfiles.yml │ ├── check_sdk_pin.yml │ ├── check_versions.yml │ ├── ci.yml │ ├── deepagents-example.yml │ ├── evals.yml │ ├── harbor.yml │ ├── pr_labeler.yml │ ├── pr_labeler_backfill.yml │ ├── pr_lint.yml │ ├── release-please.yml │ ├── release.yml │ ├── require_issue_link.yml │ ├── sync_priority_labels.yml │ └── tag-external-issues.yml ├── .gitignore ├── .markdownlint.json ├── .mcp.json ├── .pre-commit-config.yaml ├── .release-please-manifest.json ├── .vscode/ │ ├── extensions.json │ └── settings.json ├── AGENTS.md ├── LICENSE ├── Makefile ├── README.md ├── action.yml ├── examples/ │ ├── README.md │ ├── content-builder-agent/ │ │ ├── .gitignore │ │ ├── AGENTS.md │ │ ├── README.md │ │ ├── content_writer.py │ │ ├── pyproject.toml │ │ ├── skills/ │ │ │ ├── blog-post/ │ │ │ │ └── SKILL.md │ │ │ └── social-media/ │ │ │ └── SKILL.md │ │ └── subagents.yaml │ ├── deep_research/ │ │ ├── README.md │ │ ├── agent.py │ │ ├── langgraph.json │ │ ├── pyproject.toml │ │ ├── research_agent/ │ │ │ ├── __init__.py │ │ │ ├── prompts.py │ │ │ └── tools.py │ │ ├── research_agent.ipynb │ │ └── utils.py │ ├── downloading_agents/ │ │ └── README.md │ ├── nvidia_deep_agent/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── langgraph.json │ │ ├── pyproject.toml │ │ ├── skills/ │ │ │ ├── cudf-analytics/ │ │ │ │ └── SKILL.md │ │ │ ├── cuml-machine-learning/ │ │ │ │ └── SKILL.md │ │ │ ├── data-visualization/ │ │ │ │ └── SKILL.md │ │ │ └── gpu-document-processing/ │ │ │ └── SKILL.md │ │ └── src/ │ │ ├── AGENTS.md │ │ ├── __init__.py │ │ ├── agent.py │ │ ├── backend.py │ │ ├── prompts.py │ │ └── tools.py │ ├── ralph_mode/ │ │ ├── README.md │ │ └── ralph_mode.py │ └── text-to-sql-agent/ │ ├── .gitignore │ ├── AGENTS.md │ ├── README.md │ ├── agent.py │ ├── pyproject.toml │ └── skills/ │ ├── query-writing/ │ │ └── SKILL.md │ └── schema-exploration/ │ └── SKILL.md ├── libs/ │ ├── README.md │ ├── acp/ │ │ ├── Makefile │ │ ├── README.md │ │ ├── deepagents_acp/ │ │ │ ├── __init__.py │ │ │ ├── __main__.py │ │ │ ├── py.typed.py │ │ │ ├── server.py │ │ │ └── utils.py │ │ ├── examples/ │ │ │ ├── __init__.py │ │ │ ├── demo_agent.py │ │ │ └── local_context.py │ │ ├── pyproject.toml │ │ ├── run_demo_agent.sh │ │ └── tests/ │ │ ├── __init__.py │ │ ├── chat_model.py │ │ ├── test_agent.py │ │ ├── test_command_allowlist.py │ │ ├── test_main.py │ │ └── test_utils.py │ ├── cli/ │ │ ├── CHANGELOG.md │ │ ├── Makefile │ │ ├── README.md │ │ ├── deepagents_cli/ │ │ │ ├── __init__.py │ │ │ ├── __main__.py │ │ │ ├── _ask_user_types.py │ │ │ ├── _cli_context.py │ │ │ ├── _debug.py │ │ │ ├── _server_config.py │ │ │ ├── _server_constants.py │ │ │ ├── _session_stats.py │ │ │ ├── _testing_models.py │ │ │ ├── _version.py │ │ │ ├── agent.py │ │ │ ├── app.py │ │ │ ├── app.tcss │ │ │ ├── ask_user.py │ │ │ ├── built_in_skills/ │ │ │ │ ├── __init__.py │ │ │ │ └── skill-creator/ │ │ │ │ ├── SKILL.md │ │ │ │ └── scripts/ │ │ │ │ ├── init_skill.py │ │ │ │ └── quick_validate.py │ │ │ ├── clipboard.py │ │ │ ├── command_registry.py │ │ │ ├── config.py │ │ │ ├── configurable_model.py │ │ │ ├── default_agent_prompt.md │ │ │ ├── editor.py │ │ │ ├── file_ops.py │ │ │ ├── hooks.py │ │ │ ├── input.py │ │ │ ├── integrations/ │ │ │ │ ├── __init__.py │ │ │ │ ├── sandbox_factory.py │ │ │ │ └── sandbox_provider.py │ │ │ ├── local_context.py │ │ │ ├── main.py │ │ │ ├── mcp_tools.py │ │ │ ├── mcp_trust.py │ │ │ ├── media_utils.py │ │ │ ├── model_config.py │ │ │ ├── non_interactive.py │ │ │ ├── offload.py │ │ │ ├── output.py │ │ │ ├── project_utils.py │ │ │ ├── prompts.py │ │ │ ├── py.typed │ │ │ ├── remote_client.py │ │ │ ├── server.py │ │ │ ├── server_graph.py │ │ │ ├── server_manager.py │ │ │ ├── sessions.py │ │ │ ├── skills/ │ │ │ │ ├── __init__.py │ │ │ │ ├── commands.py │ │ │ │ └── load.py │ │ │ ├── subagents.py │ │ │ ├── system_prompt.md │ │ │ ├── textual_adapter.py │ │ │ ├── tool_display.py │ │ │ ├── tools.py │ │ │ ├── ui.py │ │ │ ├── unicode_security.py │ │ │ ├── update_check.py │ │ │ └── widgets/ │ │ │ ├── __init__.py │ │ │ ├── _links.py │ │ │ ├── approval.py │ │ │ ├── ask_user.py │ │ │ ├── autocomplete.py │ │ │ ├── chat_input.py │ │ │ ├── diff.py │ │ │ ├── history.py │ │ │ ├── loading.py │ │ │ ├── mcp_viewer.py │ │ │ ├── message_store.py │ │ │ ├── messages.py │ │ │ ├── model_selector.py │ │ │ ├── status.py │ │ │ ├── thread_selector.py │ │ │ ├── tool_renderers.py │ │ │ ├── tool_widgets.py │ │ │ └── welcome.py │ │ ├── examples/ │ │ │ └── skills/ │ │ │ ├── arxiv-search/ │ │ │ │ ├── SKILL.md │ │ │ │ └── arxiv_search.py │ │ │ ├── langgraph-docs/ │ │ │ │ └── SKILL.md │ │ │ ├── skill-creator/ │ │ │ │ ├── SKILL.md │ │ │ │ └── scripts/ │ │ │ │ ├── init_skill.py │ │ │ │ └── quick_validate.py │ │ │ └── web-research/ │ │ │ └── SKILL.md │ │ ├── pyproject.toml │ │ ├── scripts/ │ │ │ ├── check_imports.py │ │ │ └── install.sh │ │ └── tests/ │ │ ├── README.md │ │ ├── integration_tests/ │ │ │ ├── __init__.py │ │ │ ├── benchmarks/ │ │ │ │ ├── __init__.py │ │ │ │ ├── test_codspeed_import_benchmarks.py │ │ │ │ └── test_startup_benchmarks.py │ │ │ ├── conftest.py │ │ │ ├── test_acp_mode.py │ │ │ ├── test_compact_resume.py │ │ │ ├── test_sandbox_factory.py │ │ │ └── test_sandbox_operations.py │ │ └── unit_tests/ │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── skills/ │ │ │ ├── __init__.py │ │ │ ├── test_commands.py │ │ │ ├── test_load.py │ │ │ └── test_skills_json.py │ │ ├── test_agent.py │ │ ├── test_app.py │ │ ├── test_approval.py │ │ ├── test_args.py │ │ ├── test_ask_user.py │ │ ├── test_ask_user_middleware.py │ │ ├── test_autocomplete.py │ │ ├── test_charset.py │ │ ├── test_chat_input.py │ │ ├── test_command_registry.py │ │ ├── test_compact_tool.py │ │ ├── test_config.py │ │ ├── test_configurable_model.py │ │ ├── test_debug.py │ │ ├── test_editor.py │ │ ├── test_end_to_end.py │ │ ├── test_exception_handling.py │ │ ├── test_file_ops.py │ │ ├── test_history.py │ │ ├── test_hooks.py │ │ ├── test_imports.py │ │ ├── test_input_parsing.py │ │ ├── test_local_context.py │ │ ├── test_main.py │ │ ├── test_main_acp_mode.py │ │ ├── test_main_args.py │ │ ├── test_mcp_tools.py │ │ ├── test_mcp_trust.py │ │ ├── test_mcp_viewer.py │ │ ├── test_media_utils.py │ │ ├── test_message_store.py │ │ ├── test_messages.py │ │ ├── test_model_config.py │ │ ├── test_model_selector.py │ │ ├── test_model_switch.py │ │ ├── test_non_interactive.py │ │ ├── test_offload.py │ │ ├── test_output.py │ │ ├── test_prompts.py │ │ ├── test_reload.py │ │ ├── test_remote_client.py │ │ ├── test_sandbox_factory.py │ │ ├── test_server.py │ │ ├── test_server_config.py │ │ ├── test_server_graph.py │ │ ├── test_server_helpers.py │ │ ├── test_server_manager.py │ │ ├── test_sessions.py │ │ ├── test_shell_allow_list.py │ │ ├── test_status.py │ │ ├── test_subagents.py │ │ ├── test_textual_adapter.py │ │ ├── test_thread_selector.py │ │ ├── test_token_tracker.py │ │ ├── test_ui.py │ │ ├── test_unicode_security.py │ │ ├── test_update_check.py │ │ ├── test_version.py │ │ ├── test_welcome.py │ │ └── tools/ │ │ ├── __init__.py │ │ └── test_fetch_url.py │ ├── deepagents/ │ │ ├── Makefile │ │ ├── README.md │ │ ├── deepagents/ │ │ │ ├── __init__.py │ │ │ ├── _models.py │ │ │ ├── _version.py │ │ │ ├── backends/ │ │ │ │ ├── __init__.py │ │ │ │ ├── composite.py │ │ │ │ ├── filesystem.py │ │ │ │ ├── langsmith.py │ │ │ │ ├── local_shell.py │ │ │ │ ├── protocol.py │ │ │ │ ├── sandbox.py │ │ │ │ ├── state.py │ │ │ │ ├── store.py │ │ │ │ └── utils.py │ │ │ ├── base_prompt.md │ │ │ ├── graph.py │ │ │ ├── middleware/ │ │ │ │ ├── __init__.py │ │ │ │ ├── _utils.py │ │ │ │ ├── async_subagents.py │ │ │ │ ├── filesystem.py │ │ │ │ ├── memory.py │ │ │ │ ├── patch_tool_calls.py │ │ │ │ ├── skills.py │ │ │ │ ├── subagents.py │ │ │ │ └── summarization.py │ │ │ └── py.typed │ │ ├── pyproject.toml │ │ ├── scripts/ │ │ │ └── check_imports.py │ │ └── tests/ │ │ ├── README.md │ │ ├── __init__.py │ │ ├── integration_tests/ │ │ │ ├── __init__.py │ │ │ ├── test_deepagents.py │ │ │ ├── test_filesystem_middleware.py │ │ │ ├── test_langsmith_sandbox.py │ │ │ └── test_subagent_middleware.py │ │ ├── unit_tests/ │ │ │ ├── __init__.py │ │ │ ├── backends/ │ │ │ │ ├── __init__.py │ │ │ │ ├── test_backwards_compat.py │ │ │ │ ├── test_composite_backend.py │ │ │ │ ├── test_composite_backend_async.py │ │ │ │ ├── test_file_format.py │ │ │ │ ├── test_filesystem_backend.py │ │ │ │ ├── test_filesystem_backend_async.py │ │ │ │ ├── test_langsmith_sandbox.py │ │ │ │ ├── test_local_shell_backend.py │ │ │ │ ├── test_protocol.py │ │ │ │ ├── test_sandbox_backend.py │ │ │ │ ├── test_state_backend.py │ │ │ │ ├── test_state_backend_async.py │ │ │ │ ├── test_store_backend.py │ │ │ │ ├── test_store_backend_async.py │ │ │ │ ├── test_timeout_compat.py │ │ │ │ └── test_utils.py │ │ │ ├── chat_model.py │ │ │ ├── middleware/ │ │ │ │ ├── __init__.py │ │ │ │ ├── test_compact_tool.py │ │ │ │ ├── test_filesystem_middleware_init.py │ │ │ │ ├── test_memory_middleware.py │ │ │ │ ├── test_memory_middleware_async.py │ │ │ │ ├── test_skills_middleware.py │ │ │ │ ├── test_skills_middleware_async.py │ │ │ │ ├── test_subagent_middleware_init.py │ │ │ │ ├── test_summarization_factory.py │ │ │ │ ├── test_summarization_middleware.py │ │ │ │ └── test_tool_schemas.py │ │ │ ├── smoke_tests/ │ │ │ │ ├── __init__.py │ │ │ │ ├── conftest.py │ │ │ │ ├── snapshots/ │ │ │ │ │ ├── custom_system_message.md │ │ │ │ │ ├── system_prompt_with_execute.md │ │ │ │ │ ├── system_prompt_with_memory_and_skills.md │ │ │ │ │ ├── system_prompt_with_sync_and_async_subagents.md │ │ │ │ │ └── system_prompt_without_execute.md │ │ │ │ └── test_system_prompt.py │ │ │ ├── test_async_subagents.py │ │ │ ├── test_benchmark_create_deep_agent.py │ │ │ ├── test_end_to_end.py │ │ │ ├── test_file_system_tools.py │ │ │ ├── test_file_system_tools_async.py │ │ │ ├── test_local_sandbox_operations.py │ │ │ ├── test_local_shell.py │ │ │ ├── test_middleware.py │ │ │ ├── test_middleware_async.py │ │ │ ├── test_models.py │ │ │ ├── test_subagents.py │ │ │ ├── test_timing.py │ │ │ ├── test_todo_middleware.py │ │ │ └── test_version.py │ │ └── utils.py │ ├── evals/ │ │ ├── Makefile │ │ ├── README.md │ │ ├── deepagents_evals/ │ │ │ ├── __init__.py │ │ │ ├── categories.json │ │ │ └── radar.py │ │ ├── deepagents_harbor/ │ │ │ ├── __init__.py │ │ │ ├── backend.py │ │ │ ├── deepagents_wrapper.py │ │ │ ├── failure.py │ │ │ ├── langsmith.py │ │ │ ├── metadata.py │ │ │ └── stats.py │ │ ├── pyproject.toml │ │ ├── scripts/ │ │ │ ├── analyze.py │ │ │ ├── generate_radar.py │ │ │ └── harbor_langsmith.py │ │ └── tests/ │ │ ├── __init__.py │ │ ├── evals/ │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── conftest.py │ │ │ ├── data/ │ │ │ │ ├── benchmark_samples/ │ │ │ │ │ ├── bfcl_v3_final.json │ │ │ │ │ ├── frames_final.json │ │ │ │ │ └── nexus_final.json │ │ │ │ └── bfcl_apis/ │ │ │ │ ├── __init__.py │ │ │ │ ├── long_context.py │ │ │ │ ├── message_api.py │ │ │ │ ├── ticket_api.py │ │ │ │ ├── trading_bot.py │ │ │ │ ├── travel_booking.py │ │ │ │ └── vehicle_control.py │ │ │ ├── external_benchmarks.py │ │ │ ├── fixtures/ │ │ │ │ └── summarization_seed_messages.json │ │ │ ├── llm_judge.py │ │ │ ├── memory_agent_bench/ │ │ │ │ ├── __init__.py │ │ │ │ ├── configs.py │ │ │ │ ├── data_utils.py │ │ │ │ ├── eval_utils.py │ │ │ │ └── test_memory_agent_bench.py │ │ │ ├── pytest_reporter.py │ │ │ ├── tau2_airline/ │ │ │ │ ├── LICENSE │ │ │ │ ├── __init__.py │ │ │ │ ├── data/ │ │ │ │ │ ├── db.json │ │ │ │ │ ├── policy.md │ │ │ │ │ └── tasks.json │ │ │ │ ├── domain.py │ │ │ │ ├── evaluation.py │ │ │ │ ├── runner.py │ │ │ │ ├── test_tau2_airline.py │ │ │ │ └── user_sim.py │ │ │ ├── test__reporter_sample.py │ │ │ ├── test_external_benchmarks.py │ │ │ ├── test_file_operations.py │ │ │ ├── test_followup_quality.py │ │ │ ├── test_hitl.py │ │ │ ├── test_memory.py │ │ │ ├── test_memory_multiturn.py │ │ │ ├── test_skills.py │ │ │ ├── test_subagents.py │ │ │ ├── test_summarization.py │ │ │ ├── test_system_prompt.py │ │ │ ├── test_tool_selection.py │ │ │ ├── test_tool_usage_relational.py │ │ │ └── utils.py │ │ └── unit_tests/ │ │ ├── __init__.py │ │ ├── test_category_tagging.py │ │ ├── test_external_benchmark_helpers.py │ │ ├── test_imports.py │ │ ├── test_infra.py │ │ └── test_radar.py │ └── partners/ │ ├── daytona/ │ │ ├── LICENSE │ │ ├── Makefile │ │ ├── README.md │ │ ├── langchain_daytona/ │ │ │ ├── __init__.py │ │ │ └── sandbox.py │ │ ├── pyproject.toml │ │ └── tests/ │ │ ├── __init__.py │ │ ├── integration_tests/ │ │ │ ├── __init__.py │ │ │ └── test_integration.py │ │ ├── test_import.py │ │ └── unit_tests/ │ │ ├── __init__.py │ │ └── test_import.py │ ├── modal/ │ │ ├── LICENSE │ │ ├── Makefile │ │ ├── README.md │ │ ├── langchain_modal/ │ │ │ ├── __init__.py │ │ │ └── sandbox.py │ │ ├── pyproject.toml │ │ └── tests/ │ │ ├── __init__.py │ │ ├── integration_tests/ │ │ │ ├── __init__.py │ │ │ └── test_integration.py │ │ ├── test_import.py │ │ └── unit_tests/ │ │ ├── __init__.py │ │ └── test_import.py │ ├── quickjs/ │ │ ├── LICENSE │ │ ├── Makefile │ │ ├── README.md │ │ ├── langchain_quickjs/ │ │ │ ├── __init__.py │ │ │ ├── _foreign_function_docs.py │ │ │ ├── _foreign_functions.py │ │ │ └── middleware.py │ │ ├── pyproject.toml │ │ └── tests/ │ │ ├── __init__.py │ │ └── unit_tests/ │ │ ├── __init__.py │ │ ├── chat_model.py │ │ ├── smoke_tests/ │ │ │ ├── __init__.py │ │ │ ├── conftest.py │ │ │ ├── snapshots/ │ │ │ │ ├── quickjs_system_prompt_mixed_foreign_functions.md │ │ │ │ └── quickjs_system_prompt_no_tools.md │ │ │ └── test_system_prompt.py │ │ ├── test_end_to_end.py │ │ ├── test_end_to_end_async.py │ │ ├── test_foreign_function_docs.py │ │ ├── test_import.py │ │ └── test_system_prompt.py │ └── runloop/ │ ├── LICENSE │ ├── Makefile │ ├── README.md │ ├── langchain_runloop/ │ │ ├── __init__.py │ │ └── sandbox.py │ ├── pyproject.toml │ └── tests/ │ ├── __init__.py │ ├── integration_tests/ │ │ ├── __init__.py │ │ └── test_integration.py │ ├── test_import.py │ └── unit_tests/ │ ├── __init__.py │ └── test_import.py └── release-please-config.json
Showing preview only (701K chars total). Download the full file or copy to clipboard to get everything.
SYMBOL INDEX (6736 symbols across 295 files)
FILE: .github/scripts/aggregate_evals.py
function _format_table (line 12) | def _format_table(rows: list[dict[str, object]], headers: list[str]) -> ...
function _load_category_labels (line 50) | def _load_category_labels() -> dict[str, str]:
function _build_category_table (line 63) | def _build_category_table(rows: list[dict[str, object]]) -> list[str]:
function main (line 97) | def main() -> None:
FILE: .github/scripts/check_extras_sync.py
function _normalize (line 19) | def _normalize(name: str) -> str:
function _parse_dep (line 28) | def _parse_dep(dep: str) -> tuple[str, str]:
function main (line 46) | def main(pyproject_path: Path) -> int:
FILE: .github/scripts/check_version_equality.py
function _get_pyproject_version (line 21) | def _get_pyproject_version(path: Path) -> str:
function _get_version_py (line 39) | def _get_version_py(path: Path) -> str:
function main (line 59) | def main() -> int:
FILE: .github/scripts/models.py
class Model (line 30) | class Model(NamedTuple):
function _filter_by_tag (line 181) | def _filter_by_tag(prefix: str, tag: str | None) -> list[str]:
function _resolve_models (line 188) | def _resolve_models(workflow: str, selection: str) -> list[str]:
function main (line 222) | def main() -> None:
FILE: .github/scripts/pr-labeler.js
function loadConfig (line 9) | function loadConfig() {
function init (line 35) | function init(github, owner, repo, config, core) {
function loadAndInit (line 273) | function loadAndInit(github, owner, repo, core) {
FILE: examples/content-builder-agent/content_writer.py
function web_search (line 45) | def web_search(
function generate_cover (line 74) | def generate_cover(prompt: str, slug: str) -> str:
function generate_social_image (line 104) | def generate_social_image(prompt: str, platform: str, slug: str) -> str:
function load_subagents (line 134) | def load_subagents(config_path: Path) -> list:
function create_content_writer (line 166) | def create_content_writer():
class AgentDisplay (line 177) | class AgentDisplay:
method __init__ (line 180) | def __init__(self):
method update_status (line 185) | def update_status(self, status: str):
method print_message (line 189) | def print_message(self, msg):
function main (line 239) | async def main():
FILE: examples/deep_research/research_agent/tools.py
function fetch_webpage_content (line 16) | def fetch_webpage_content(url: str, timeout: float = 10.0) -> str:
function tavily_search (line 39) | def tavily_search(
function think_tool (line 92) | def think_tool(reflection: str) -> str:
FILE: examples/deep_research/utils.py
function format_message_content (line 12) | def format_message_content(message):
function format_messages (line 47) | def format_messages(messages):
function format_message (line 63) | def format_message(messages):
function show_prompt (line 68) | def show_prompt(prompt_text: str, title: str = "Prompt", border_style: s...
FILE: examples/nvidia_deep_agent/src/agent.py
class Context (line 32) | class Context(TypedDict, total=False):
FILE: examples/nvidia_deep_agent/src/backend.py
function _seed_sandbox (line 35) | def _seed_sandbox(backend: ModalSandbox) -> None:
function create_backend (line 67) | def create_backend(runtime):
FILE: examples/nvidia_deep_agent/src/tools.py
function fetch_webpage_content (line 16) | def fetch_webpage_content(url: str, timeout: float = 10.0) -> str:
function tavily_search (line 39) | def tavily_search(
FILE: examples/ralph_mode/ralph_mode.py
function ralph (line 46) | async def ralph(
function main (line 156) | def main() -> None:
FILE: examples/text-to-sql-agent/agent.py
function create_sql_deep_agent (line 20) | def create_sql_deep_agent():
function main (line 52) | def main():
FILE: libs/acp/deepagents_acp/__main__.py
function main (line 8) | def main() -> None:
FILE: libs/acp/deepagents_acp/server.py
class AgentSessionContext (line 74) | class AgentSessionContext:
class AgentServerACP (line 81) | class AgentServerACP(ACPAgent):
method __init__ (line 86) | def __init__(
method on_connect (line 115) | def on_connect(self, conn: Client) -> None:
method initialize (line 119) | async def initialize(
method new_session (line 136) | async def new_session(
method set_session_mode (line 158) | async def set_session_mode(
method cancel (line 175) | async def cancel(self, session_id: str, **kwargs: Any) -> None: # noq...
method _log_text (line 179) | async def _log_text(self, session_id: str, text: str) -> None:
method _all_tasks_completed (line 184) | def _all_tasks_completed(self, plan: list[dict[str, Any]]) -> bool:
method _clear_plan (line 198) | async def _clear_plan(self, session_id: str) -> None:
method _handle_todo_update (line 216) | async def _handle_todo_update(
method _process_tool_call_chunks (line 269) | async def _process_tool_call_chunks(
method _create_tool_call_start (line 337) | def _create_tool_call_start(
method _reset_agent (line 420) | def _reset_agent(self, session_id: str) -> None:
method prompt (line 432) | async def prompt( # noqa: C901, PLR0912, PLR0915 # Complex streaming...
method _handle_interrupts (line 625) | async def _handle_interrupts( # noqa: C901, PLR0912, PLR0915 # Compl...
function _serve_test_agent (line 801) | async def _serve_test_agent() -> None:
FILE: libs/acp/deepagents_acp/utils.py
function convert_text_block_to_content_blocks (line 18) | def convert_text_block_to_content_blocks(block: TextContentBlock) -> lis...
function convert_image_block_to_content_blocks (line 23) | def convert_image_block_to_content_blocks(block: ImageContentBlock) -> l...
function convert_audio_block_to_content_blocks (line 34) | def convert_audio_block_to_content_blocks(block: AudioContentBlock) -> l...
function convert_resource_block_to_content_blocks (line 44) | def convert_resource_block_to_content_blocks(
function convert_embedded_resource_block_to_content_blocks (line 73) | def convert_embedded_resource_block_to_content_blocks(
function extract_command_types (line 101) | def extract_command_types(command: str) -> list[str]: # noqa: C901, PLR...
function truncate_execute_command_for_display (line 278) | def truncate_execute_command_for_display(command: str) -> str:
function format_execute_result (line 285) | def format_execute_result(command: str, result: str) -> str:
FILE: libs/acp/examples/demo_agent.py
function _get_interrupt_config (line 24) | def _get_interrupt_config(mode_id: str) -> dict:
function _serve_example_agent (line 42) | async def _serve_example_agent() -> None:
function main (line 107) | def main() -> None:
FILE: libs/acp/examples/local_context.py
class _ExecutableBackend (line 39) | class _ExecutableBackend(Protocol):
method execute (line 42) | def execute(self, command: str) -> ExecuteResponse: ...
function _section_header (line 60) | def _section_header() -> str:
function _section_project (line 80) | def _section_project() -> str:
function _section_package_managers (line 120) | def _section_package_managers() -> str:
function _section_runtimes (line 150) | def _section_runtimes() -> str:
function _section_git (line 170) | def _section_git() -> str:
function _section_test_command (line 202) | def _section_test_command() -> str:
function _section_files (line 224) | def _section_files() -> str:
function _section_tree (line 254) | def _section_tree() -> str:
function _section_makefile (line 277) | def _section_makefile() -> str:
function build_detect_script (line 300) | def build_detect_script() -> str:
class LocalContextState (line 328) | class LocalContextState(AgentState):
class LocalContextMiddleware (line 350) | class LocalContextMiddleware(AgentMiddleware):
method __init__ (line 363) | def __init__(self, backend: _ExecutableBackend) -> None:
method _run_detect_script (line 371) | def _run_detect_script(self) -> str | None:
method before_agent (line 404) | def before_agent( # type: ignore[override]
method _get_modified_request (line 460) | def _get_modified_request(request: ModelRequest) -> ModelRequest | None:
method wrap_model_call (line 479) | def wrap_model_call(
method awrap_model_call (line 496) | async def awrap_model_call(
FILE: libs/acp/tests/chat_model.py
class GenericFakeChatModel (line 17) | class GenericFakeChatModel(BaseChatModel):
method _generate (line 64) | def _generate(
method _stream (line 76) | def _stream(
method _llm_type (line 199) | def _llm_type(self) -> str:
method bind_tools (line 202) | def bind_tools(
FILE: libs/acp/tests/test_agent.py
class FakeACPClient (line 37) | class FakeACPClient(Client):
method __init__ (line 38) | def __init__(
method session_update (line 46) | async def session_update(self, session_id: str, update: Any, source: s...
method request_permission (line 56) | async def request_permission(
function test_acp_agent_prompt_streams_text (line 79) | async def test_acp_agent_prompt_streams_text() -> None:
function test_acp_agent_cancel_stops_prompt (line 105) | async def test_acp_agent_cancel_stops_prompt() -> None:
function test_acp_agent_prompt_streams_list_content_blocks (line 127) | async def test_acp_agent_prompt_streams_list_content_blocks() -> None:
function test_acp_agent_initialize_and_modes (line 176) | async def test_acp_agent_initialize_and_modes() -> None:
function write_file_tool (line 193) | def write_file_tool(file_path: str, content: str) -> str:
function test_acp_agent_hitl_requests_permission_via_public_api (line 197) | async def test_acp_agent_hitl_requests_permission_via_public_api() -> None:
function test_acp_deep_agent_hitl_interrupt_on_edit_file_requests_permission (line 240) | async def test_acp_deep_agent_hitl_interrupt_on_edit_file_requests_permi...
function test_acp_agent_tool_call_chunk_starts_tool_call (line 286) | async def test_acp_agent_tool_call_chunk_starts_tool_call() -> None:
function test_acp_agent_tool_result_completes_tool_call (line 323) | async def test_acp_agent_tool_result_completes_tool_call() -> None:
function test_acp_agent_multimodal_prompt_blocks_do_not_error (line 378) | async def test_acp_agent_multimodal_prompt_blocks_do_not_error() -> None:
function test_acp_agent_end_to_end_clears_plan (line 412) | async def test_acp_agent_end_to_end_clears_plan() -> None:
function test_acp_agent_hitl_approve_always_execute_auto_approves_next_time (line 469) | async def test_acp_agent_hitl_approve_always_execute_auto_approves_next_...
function test_acp_agent_hitl_approve_always_tool_auto_approves_next_time (line 531) | async def test_acp_agent_hitl_approve_always_tool_auto_approves_next_tim...
function test_acp_agent_hitl_client_cancel_raises_request_error (line 584) | async def test_acp_agent_hitl_client_cancel_raises_request_error() -> None:
function test_acp_agent_nested_agent_tool_call_returns_final_text (line 632) | async def test_acp_agent_nested_agent_tool_call_returns_final_text() -> ...
function test_acp_agent_with_prebuilt_langchain_agent_end_to_end (line 681) | async def test_acp_agent_with_prebuilt_langchain_agent_end_to_end() -> N...
function test_acp_langchain_create_agent_nested_agent_tool_call_messages (line 704) | async def test_acp_langchain_create_agent_nested_agent_tool_call_message...
function test_set_session_mode_resets_agent_with_new_mode (line 770) | async def test_set_session_mode_resets_agent_with_new_mode() -> None:
function test_reset_agent_with_compiled_state_graph (line 833) | async def test_reset_agent_with_compiled_state_graph() -> None:
function test_reset_agent_preserves_session_cwd (line 855) | async def test_reset_agent_preserves_session_cwd() -> None:
function test_acp_agent_hitl_requests_permission_only_once (line 895) | async def test_acp_agent_hitl_requests_permission_only_once() -> None:
FILE: libs/acp/tests/test_command_allowlist.py
class TestExtractCommandTypes (line 12) | class TestExtractCommandTypes:
method test_simple_non_sensitive_command (line 15) | def test_simple_non_sensitive_command(self):
method test_npm_commands_with_subcommands (line 21) | def test_npm_commands_with_subcommands(self):
method test_python_with_module_flag (line 28) | def test_python_with_module_flag(self):
method test_python_with_code_flag (line 34) | def test_python_with_code_flag(self):
method test_python_script_execution (line 43) | def test_python_script_execution(self):
method test_node_commands (line 48) | def test_node_commands(self):
method test_npx_with_package (line 58) | def test_npx_with_package(self):
method test_yarn_commands (line 63) | def test_yarn_commands(self):
method test_uv_commands (line 69) | def test_uv_commands(self):
method test_command_with_and_operator (line 77) | def test_command_with_and_operator(self):
method test_command_with_pipes_and_and_operator (line 90) | def test_command_with_pipes_and_and_operator(self):
method test_empty_command (line 96) | def test_empty_command(self):
method test_command_with_trailing_and_operator (line 101) | def test_command_with_trailing_and_operator(self):
method test_duplicate_commands_preserved (line 106) | def test_duplicate_commands_preserved(self):
method test_complex_real_world_command (line 114) | def test_complex_real_world_command(self):
method test_security_python_different_modules (line 119) | def test_security_python_different_modules(self):
method test_security_npm_different_subcommands (line 127) | def test_security_npm_different_subcommands(self):
class TestCommandTypeAllowlist (line 135) | class TestCommandTypeAllowlist:
method test_allowed_command_types_initialized (line 138) | def test_allowed_command_types_initialized(self):
method test_can_add_allowed_command_type (line 147) | def test_can_add_allowed_command_type(self):
method test_command_types_are_session_specific (line 170) | def test_command_types_are_session_specific(self):
method test_multiple_command_types_in_single_command (line 193) | def test_multiple_command_types_in_single_command(self):
method test_security_python_pytest_vs_pip (line 226) | def test_security_python_pytest_vs_pip(self):
method test_security_npm_install_vs_run (line 259) | def test_security_npm_install_vs_run(self):
method test_security_uv_run_pytest_vs_python (line 284) | def test_security_uv_run_pytest_vs_python(self):
FILE: libs/acp/tests/test_main.py
function test_import_main_module (line 4) | def test_import_main_module() -> None:
FILE: libs/acp/tests/test_utils.py
function test_convert_text_block_to_content_blocks (line 19) | def test_convert_text_block_to_content_blocks() -> None:
function test_convert_image_block_to_content_blocks_with_data (line 24) | def test_convert_image_block_to_content_blocks_with_data() -> None:
function test_convert_image_block_to_content_blocks_without_data_falls_back_to_text (line 31) | def test_convert_image_block_to_content_blocks_without_data_falls_back_t...
function test_convert_resource_block_to_content_blocks_truncates_root_dir (line 38) | def test_convert_resource_block_to_content_blocks_truncates_root_dir() -...
function test_convert_embedded_resource_block_to_content_blocks_text (line 50) | def test_convert_embedded_resource_block_to_content_blocks_text() -> None:
FILE: libs/cli/deepagents_cli/__init__.py
function __getattr__ (line 18) | def __getattr__(name: str) -> Callable[[], None]:
FILE: libs/cli/deepagents_cli/_ask_user_types.py
class Choice (line 16) | class Choice(TypedDict):
class Question (line 22) | class Question(TypedDict):
class AskUserRequest (line 59) | class AskUserRequest(TypedDict):
class AskUserAnswered (line 69) | class AskUserAnswered(TypedDict):
class AskUserCancelled (line 79) | class AskUserCancelled(TypedDict):
FILE: libs/cli/deepagents_cli/_cli_context.py
class CLIContext (line 15) | class CLIContext(TypedDict, total=False):
FILE: libs/cli/deepagents_cli/_debug.py
function configure_debug_logging (line 16) | def configure_debug_logging(target: logging.Logger) -> None:
FILE: libs/cli/deepagents_cli/_server_config.py
function _read_env_bool (line 30) | def _read_env_bool(suffix: str, *, default: bool = False) -> bool:
function _read_env_json (line 49) | def _read_env_json(suffix: str) -> Any: # noqa: ANN401
function _read_env_str (line 74) | def _read_env_str(suffix: str) -> str | None:
function _read_env_optional_bool (line 86) | def _read_env_optional_bool(suffix: str) -> bool | None:
class ServerConfig (line 105) | class ServerConfig:
method __post_init__ (line 132) | def __post_init__(self) -> None:
method to_env (line 141) | def to_env(self) -> dict[str, str | None]:
method from_env (line 180) | def from_env(cls) -> ServerConfig:
method from_cli_args (line 215) | def from_cli_args(
function _normalize_path (line 288) | def _normalize_path(
FILE: libs/cli/deepagents_cli/_session_stats.py
class ModelStats (line 19) | class ModelStats:
class SessionStats (line 34) | class SessionStats:
method record_request (line 56) | def record_request(
method merge (line 82) | def merge(self, other: SessionStats) -> None:
function format_token_count (line 101) | def format_token_count(count: int) -> str:
FILE: libs/cli/deepagents_cli/_testing_models.py
class DeterministicIntegrationChatModel (line 21) | class DeterministicIntegrationChatModel(GenericFakeChatModel):
method bind_tools (line 65) | def bind_tools(
method _generate (line 75) | def _generate(
method _llm_type (line 106) | def _llm_type(self) -> str:
method _stringify_message (line 111) | def _stringify_message(message: BaseMessage) -> str:
method _looks_like_summary_request (line 133) | def _looks_like_summary_request(prompt: str) -> bool:
FILE: libs/cli/deepagents_cli/agent.py
function load_async_subagents (line 68) | def load_async_subagents(config_path: Path | None = None) -> list[AsyncS...
function list_agents (line 136) | def list_agents(*, output_format: OutputFormat = "text") -> None:
function reset_agent (line 211) | def reset_agent(
function build_model_identity_section (line 280) | def build_model_identity_section(
function get_system_prompt (line 308) | def get_system_prompt(
function _format_write_file_description (line 443) | def _format_write_file_description(
function _format_edit_file_description (line 461) | def _format_edit_file_description(
function _format_web_search_description (line 477) | def _format_web_search_description(
function _format_fetch_url_description (line 495) | def _format_fetch_url_description(
function _format_task_description (line 529) | def _format_task_description(
function _format_execute_description (line 562) | def _format_execute_description(
function _add_interrupt_on (line 593) | def _add_interrupt_on() -> dict[str, InterruptOnConfig]:
function create_cli_agent (line 665) | def create_cli_agent(
FILE: libs/cli/deepagents_cli/app.py
function _write_iterm_escape (line 113) | def _write_iterm_escape(sequence: str) -> None:
function _restore_cursor_guide (line 138) | def _restore_cursor_guide() -> None:
function _extract_model_params_flag (line 149) | def _extract_model_params_flag(raw_arg: str) -> tuple[str, dict[str, Any...
class QueuedMessage (line 258) | class QueuedMessage:
class DeferredAction (line 273) | class DeferredAction:
class TextualTokenTracker (line 283) | class TextualTokenTracker:
method __init__ (line 286) | def __init__(
method add (line 296) | def add(self, total_tokens: int, _output_tokens: int = 0) -> None:
method reset (line 306) | def reset(self) -> None:
method hide (line 311) | def hide(self) -> None:
method show (line 316) | def show(self) -> None:
function _new_thread_id (line 321) | def _new_thread_id() -> str:
class TextualSessionState (line 332) | class TextualSessionState:
method __init__ (line 335) | def __init__(
method reset_thread (line 350) | def reset_thread(self) -> str:
class DeepAgentsApp (line 368) | class DeepAgentsApp(App):
class ServerReady (line 424) | class ServerReady(Message):
method __init__ (line 427) | def __init__( # noqa: D107
class ServerStartFailed (line 438) | class ServerStartFailed(Message):
method __init__ (line 441) | def __init__(self, error: Exception) -> None: # noqa: D107
method __init__ (line 445) | def __init__(
method _remote_agent (line 572) | def _remote_agent(self) -> RemoteAgent | None:
method compose (line 592) | def compose(self) -> ComposeResult:
method on_mount (line 620) | async def on_mount(self) -> None:
method _resolve_git_branch_and_continue (line 656) | async def _resolve_git_branch_and_continue(self) -> None:
method _post_paint_init (line 696) | async def _post_paint_init(self) -> None:
method _init_session_state (line 768) | async def _init_session_state(self) -> None:
method _check_optional_tools_background (line 787) | async def _check_optional_tools_background(self) -> None:
method _init_agent_adapter (line 817) | def _init_agent_adapter(self) -> None:
method _resolve_resume_thread (line 845) | async def _resolve_resume_thread(self) -> None:
method _start_server_background (line 922) | async def _start_server_background(self) -> None:
method on_deep_agents_app_server_ready (line 1000) | def on_deep_agents_app_server_ready(self, event: ServerReady) -> None:
method on_deep_agents_app_server_start_failed (line 1058) | def on_deep_agents_app_server_start_failed(self, event: ServerStartFai...
method _prewarm_deferred_imports (line 1083) | def _prewarm_deferred_imports() -> None:
method _prewarm_threads_cache (line 1128) | async def _prewarm_threads_cache(self) -> None: # noqa: PLR6301 # Wo...
method _prewarm_model_caches (line 1137) | async def _prewarm_model_caches(self) -> None:
method _check_for_updates (line 1152) | async def _check_for_updates(self) -> None:
method _show_whats_new (line 1213) | async def _show_whats_new(self) -> None:
method _handle_update_command (line 1244) | async def _handle_update_command(self) -> None:
method on_scroll_up (line 1288) | def on_scroll_up(self, _event: ScrollUp) -> None:
method _update_status (line 1292) | def _update_status(self, message: str) -> None:
method _update_tokens (line 1297) | def _update_tokens(self, count: int) -> None:
method _hide_tokens (line 1302) | def _hide_tokens(self) -> None:
method _check_hydration_needed (line 1307) | def _check_hydration_needed(self) -> None:
method _hydrate_messages_above (line 1327) | async def _hydrate_messages_above(self) -> None:
method _mount_before_queued (line 1403) | async def _mount_before_queued(self, container: Container, widget: Wid...
method _is_spinner_at_correct_position (line 1430) | def _is_spinner_at_correct_position(self, container: Container) -> bool:
method _set_spinner (line 1456) | async def _set_spinner(self, status: SpinnerStatus) -> None:
method _request_approval (line 1485) | async def _request_approval(
method _mount_approval_widget (line 1596) | async def _mount_approval_widget(
method _deferred_show_approval (line 1624) | async def _deferred_show_approval(
method _on_auto_approve_enabled (line 1673) | def _on_auto_approve_enabled(self) -> None:
method _remove_ask_user_widget (line 1687) | async def _remove_ask_user_widget( # noqa: PLR6301 # Shared helper u...
method _request_ask_user (line 1708) | async def _request_ask_user(
method on_ask_user_menu_answered (line 1768) | async def on_ask_user_menu_answered(
method on_ask_user_menu_cancelled (line 1781) | async def on_ask_user_menu_cancelled(
method _process_message (line 1794) | async def _process_message(self, value: str, mode: InputMode) -> None:
method _can_bypass_queue (line 1811) | def _can_bypass_queue(self, value: str) -> bool:
method on_chat_input_submitted (line 1835) | async def on_chat_input_submitted(self, event: ChatInput.Submitted) ->...
method on_chat_input_mode_changed (line 1878) | def on_chat_input_mode_changed(self, event: ChatInput.ModeChanged) -> ...
method on_chat_input_typing (line 1883) | def on_chat_input_typing(
method _is_user_typing (line 1890) | def _is_user_typing(self) -> bool:
method on_approval_menu_decided (line 1901) | async def on_approval_menu_decided(
method _handle_shell_command (line 1928) | async def _handle_shell_command(self, command: str) -> None:
method _run_shell_task (line 1948) | async def _run_shell_task(self, command: str) -> None:
method _cleanup_shell_task (line 2009) | async def _cleanup_shell_task(self) -> None:
method _kill_shell_process (line 2034) | async def _kill_shell_process(self) -> None:
method _open_url_command (line 2076) | async def _open_url_command(self, command: str, cmd: str) -> None:
method _build_thread_message (line 2116) | async def _build_thread_message(prefix: str, thread_id: str) -> str | ...
method _handle_trace_command (line 2147) | async def _handle_trace_command(self, command: str) -> None:
method _handle_command (line 2187) | async def _handle_command(self, command: str) -> None:
method _get_conversation_token_count (line 2428) | async def _get_conversation_token_count(self) -> int | None:
method _resolve_offload_budget_str (line 2455) | def _resolve_offload_budget_str(self) -> str | None:
method _handle_offload (line 2488) | async def _handle_offload(self) -> None:
method _handle_user_message (line 2617) | async def _handle_user_message(self, message: str) -> None:
method _run_agent_task (line 2648) | async def _run_agent_task(self, message: str) -> None:
method _process_next_from_queue (line 2694) | async def _process_next_from_queue(self) -> None:
method _cleanup_agent_task (line 2728) | async def _cleanup_agent_task(self) -> None:
method _convert_messages_to_data (line 2759) | def _convert_messages_to_data(messages: list[Any]) -> list[MessageData]:
method _get_thread_state_values (line 2857) | async def _get_thread_state_values(self, thread_id: str) -> dict[str, ...
method _fetch_thread_history_data (line 2903) | async def _fetch_thread_history_data(self, thread_id: str) -> list[Mes...
method _read_channel_values_from_checkpointer (line 2934) | async def _read_channel_values_from_checkpointer(thread_id: str) -> di...
method _upgrade_thread_message_link (line 2971) | async def _upgrade_thread_message_link(
method _schedule_thread_message_link (line 3009) | def _schedule_thread_message_link(
method _load_thread_history (line 3032) | async def _load_thread_history(
method _mount_message (line 3131) | async def _mount_message(
method _prune_old_messages (line 3182) | async def _prune_old_messages(self) -> None:
method _set_active_message (line 3218) | def _set_active_message(self, message_id: str | None) -> None:
method _sync_message_content (line 3226) | def _sync_message_content(self, message_id: str, content: str) -> None:
method _clear_messages (line 3242) | async def _clear_messages(self) -> None:
method _pop_last_queued_message (line 3255) | def _pop_last_queued_message(self) -> None:
method _discard_queue (line 3292) | def _discard_queue(self) -> None:
method _defer_action (line 3300) | def _defer_action(self, action: DeferredAction) -> None:
method _maybe_drain_deferred (line 3314) | async def _maybe_drain_deferred(self) -> None:
method _drain_deferred_actions (line 3319) | async def _drain_deferred_actions(self) -> None:
method _cancel_worker (line 3340) | def _cancel_worker(self, worker: Worker[None] | None) -> None:
method action_quit_or_interrupt (line 3350) | def action_quit_or_interrupt(self) -> None:
method _arm_quit_pending (line 3394) | def _arm_quit_pending(self, shortcut: str) -> None:
method action_interrupt (line 3405) | def action_interrupt(self) -> None:
method action_quit_app (line 3470) | def action_quit_app(self) -> None:
method exit (line 3488) | def exit(
method action_toggle_auto_approve (line 3532) | def action_toggle_auto_approve(self) -> None:
method action_toggle_tool_output (line 3558) | def action_toggle_tool_output(self) -> None:
method action_approval_up (line 3573) | def action_approval_up(self) -> None:
method action_approval_down (line 3580) | def action_approval_down(self) -> None:
method action_approval_select (line 3585) | def action_approval_select(self) -> None:
method _is_input_focused (line 3591) | def _is_input_focused(self) -> bool:
method action_approval_yes (line 3605) | def action_approval_yes(self) -> None:
method action_approval_auto (line 3610) | def action_approval_auto(self) -> None:
method action_approval_no (line 3615) | def action_approval_no(self) -> None:
method action_approval_escape (line 3620) | def action_approval_escape(self) -> None:
method action_open_editor (line 3625) | async def action_open_editor(self) -> None:
method on_paste (line 3655) | def on_paste(self, event: Paste) -> None:
method on_app_focus (line 3669) | def on_app_focus(self) -> None:
method on_click (line 3685) | def on_click(self, _event: Click) -> None:
method on_mouse_up (line 3694) | def on_mouse_up(self, event: MouseUp) -> None: # noqa: ARG002 # Text...
method _show_model_selector (line 3704) | async def _show_model_selector(
method _show_mcp_viewer (line 3756) | async def _show_mcp_viewer(self) -> None:
method _show_thread_selector (line 3768) | async def _show_thread_selector(self) -> None:
method _update_welcome_banner (line 3805) | def _update_welcome_banner(
method _resume_thread (line 3828) | async def _resume_thread(self, thread_id: str) -> None:
method _switch_model (line 3939) | async def _switch_model(
method _set_default_model (line 4073) | async def _set_default_model(self, model_spec: str) -> None:
method _clear_default_model (line 4102) | async def _clear_default_model(self) -> None:
class AppResult (line 4127) | class AppResult:
function run_textual_app (line 4144) | async def run_textual_app(
FILE: libs/cli/deepagents_cli/ask_user.py
function _validate_questions (line 63) | def _validate_questions(questions: list[Question]) -> None:
function _parse_answers (line 100) | def _parse_answers(
class AskUserMiddleware (line 205) | class AskUserMiddleware(AgentMiddleware[Any, ContextT, ResponseT]):
method __init__ (line 213) | def __init__(
method wrap_model_call (line 257) | def wrap_model_call(
method awrap_model_call (line 279) | async def awrap_model_call(
FILE: libs/cli/deepagents_cli/built_in_skills/skill-creator/scripts/init_skill.py
function _validate_name (line 192) | def _validate_name(name: str) -> tuple[bool, str]:
function title_case_skill_name (line 225) | def title_case_skill_name(skill_name):
function init_skill (line 234) | def init_skill(skill_name, path):
function main (line 322) | def main():
FILE: libs/cli/deepagents_cli/built_in_skills/skill-creator/scripts/quick_validate.py
function validate_skill (line 20) | def validate_skill(skill_path):
FILE: libs/cli/deepagents_cli/clipboard.py
function _copy_osc52 (line 21) | def _copy_osc52(text: str) -> None:
function _shorten_preview (line 33) | def _shorten_preview(texts: list[str]) -> str:
function copy_selection_to_clipboard (line 46) | def copy_selection_to_clipboard(app: App) -> None:
FILE: libs/cli/deepagents_cli/command_registry.py
class BypassTier (line 14) | class BypassTier(StrEnum):
class SlashCommand (line 34) | class SlashCommand:
function _build_bypass_set (line 158) | def _build_bypass_set(tier: BypassTier) -> frozenset[str]:
FILE: libs/cli/deepagents_cli/config.py
function _find_dotenv_from_start_path (line 52) | def _find_dotenv_from_start_path(start_path: Path) -> Path | None:
function _load_dotenv (line 73) | def _load_dotenv(*, start_path: Path | None = None, override: bool = Fal...
function _ensure_bootstrap (line 94) | def _ensure_bootstrap() -> None:
class CharsetMode (line 192) | class CharsetMode(StrEnum):
class Glyphs (line 201) | class Glyphs:
function _resolve_editable_info (line 299) | def _resolve_editable_info() -> tuple[bool, str | None]:
function _is_editable_install (line 337) | def _is_editable_install() -> bool:
function _get_editable_install_path (line 348) | def _get_editable_install_path() -> str | None:
function _detect_charset_mode (line 357) | def _detect_charset_mode() -> CharsetMode:
function get_glyphs (line 379) | def get_glyphs() -> Glyphs:
function reset_glyphs_cache (line 394) | def reset_glyphs_cache() -> None:
function is_ascii_mode (line 400) | def is_ascii_mode() -> bool:
function newline_shortcut (line 412) | def newline_shortcut() -> str:
function get_banner (line 460) | def get_banner() -> str:
class _ShellAllowAll (line 496) | class _ShellAllowAll(list): # noqa: FURB189 # sentinel type, not a gen...
function parse_shell_allow_list (line 509) | def parse_shell_allow_list(allow_list_str: str | None) -> list[str] | None:
class Settings (line 570) | class Settings:
method from_environment (line 624) | def from_environment(cls, *, start_path: Path | None = None) -> Settings:
method reload_from_environment (line 676) | def reload_from_environment(self, *, start_path: Path | None = None) -...
method has_openai (line 784) | def has_openai(self) -> bool:
method has_anthropic (line 789) | def has_anthropic(self) -> bool:
method has_google (line 794) | def has_google(self) -> bool:
method has_nvidia (line 799) | def has_nvidia(self) -> bool:
method has_vertex_ai (line 804) | def has_vertex_ai(self) -> bool:
method has_tavily (line 814) | def has_tavily(self) -> bool:
method user_deepagents_dir (line 819) | def user_deepagents_dir(self) -> Path:
method get_user_agent_md_path (line 828) | def get_user_agent_md_path(agent_name: str) -> Path:
method get_project_agent_md_path (line 841) | def get_project_agent_md_path(self) -> list[Path]:
method _is_valid_agent_name (line 863) | def _is_valid_agent_name(agent_name: str) -> bool:
method get_agent_dir (line 874) | def get_agent_dir(self, agent_name: str) -> Path:
method ensure_agent_dir (line 894) | def ensure_agent_dir(self, agent_name: str) -> Path:
method get_user_skills_dir (line 916) | def get_user_skills_dir(self, agent_name: str) -> Path:
method ensure_user_skills_dir (line 927) | def ensure_user_skills_dir(self, agent_name: str) -> Path:
method get_project_skills_dir (line 940) | def get_project_skills_dir(self) -> Path | None:
method ensure_project_skills_dir (line 950) | def ensure_project_skills_dir(self) -> Path | None:
method get_user_agents_dir (line 964) | def get_user_agents_dir(self, agent_name: str) -> Path:
method get_project_agents_dir (line 975) | def get_project_agents_dir(self) -> Path | None:
method user_agents_dir (line 986) | def user_agents_dir(self) -> Path:
method get_user_agent_skills_dir (line 994) | def get_user_agent_skills_dir(self) -> Path:
method get_project_agent_skills_dir (line 1004) | def get_project_agent_skills_dir(self) -> Path | None:
method get_built_in_skills_dir (line 1017) | def get_built_in_skills_dir() -> Path:
class SessionState (line 1026) | class SessionState:
method __init__ (line 1037) | def __init__(self, auto_approve: bool = False, no_splash: bool = False...
method toggle_auto_approve (line 1056) | def toggle_auto_approve(self) -> bool:
function contains_dangerous_patterns (line 1152) | def contains_dangerous_patterns(command: str) -> bool:
function is_shell_command_allowed (line 1180) | def is_shell_command_allowed(command: str, allow_list: list[str] | None)...
function get_langsmith_project_name (line 1248) | def get_langsmith_project_name() -> str | None:
function fetch_langsmith_project_url (line 1277) | def fetch_langsmith_project_url(project_name: str) -> str | None:
function build_langsmith_thread_url (line 1357) | def build_langsmith_thread_url(thread_id: str) -> str | None:
function reset_langsmith_url_cache (line 1381) | def reset_langsmith_url_cache() -> None:
function get_default_coding_instructions (line 1387) | def get_default_coding_instructions() -> str:
function detect_provider (line 1400) | def detect_provider(model_name: str) -> str | None:
function _get_default_model_spec (line 1442) | def _get_default_model_spec() -> str:
function _apply_openrouter_defaults (line 1496) | def _apply_openrouter_defaults(kwargs: dict[str, Any]) -> None:
function _get_provider_kwargs (line 1525) | def _get_provider_kwargs(
function _create_model_from_class (line 1562) | def _create_model_from_class(
function _create_model_via_init (line 1625) | def _create_model_via_init(
class ModelResult (line 1692) | class ModelResult:
method apply_to_settings (line 1710) | def apply_to_settings(self) -> None:
function _apply_profile_overrides (line 1718) | def _apply_profile_overrides(
function create_model (line 1767) | def create_model(
function validate_model_capabilities (line 1893) | def validate_model_capabilities(model: BaseChatModel, model_name: str) -...
function _get_console (line 1946) | def _get_console() -> Console:
function _get_settings (line 1969) | def _get_settings() -> Settings:
function __getattr__ (line 1999) | def __getattr__(name: str) -> Settings | Console:
FILE: libs/cli/deepagents_cli/configurable_model.py
function _is_anthropic_model (line 27) | def _is_anthropic_model(model: object) -> bool:
function _apply_overrides (line 52) | def _apply_overrides(request: ModelRequest) -> ModelRequest:
class ConfigurableModelMiddleware (line 144) | class ConfigurableModelMiddleware(AgentMiddleware):
method wrap_model_call (line 147) | def wrap_model_call( # noqa: PLR6301
method awrap_model_call (line 155) | async def awrap_model_call( # noqa: PLR6301
FILE: libs/cli/deepagents_cli/editor.py
function resolve_editor (line 30) | def resolve_editor() -> list[str] | None:
function _prepare_command (line 48) | def _prepare_command(cmd: list[str], filepath: str) -> list[str]:
function open_in_editor (line 73) | def open_in_editor(current_text: str) -> str | None:
FILE: libs/cli/deepagents_cli/file_ops.py
class ApprovalPreview (line 20) | class ApprovalPreview:
function _safe_read (line 30) | def _safe_read(path: Path) -> str | None:
function _count_lines (line 43) | def _count_lines(text: str) -> int:
function compute_unified_diff (line 54) | def compute_unified_diff(
class FileOpMetrics (line 96) | class FileOpMetrics:
class FileOperationRecord (line 109) | class FileOperationRecord:
function resolve_physical_path (line 127) | def resolve_physical_path(
function format_display_path (line 152) | def format_display_path(path_str: str | None) -> str:
function build_approval_preview (line 169) | def build_approval_preview(
class FileOpTracker (line 273) | class FileOpTracker:
method __init__ (line 276) | def __init__(
method start_operation (line 285) | def start_operation(
method complete_with_message (line 325) | def complete_with_message(self, tool_message: Any) -> FileOperationRec...
method mark_hitl_approved (line 427) | def mark_hitl_approved(self, tool_name: str, args: dict[str, Any]) -> ...
method _populate_after_content (line 440) | def _populate_after_content(self, record: FileOperationRecord) -> None:
method _finalize (line 471) | def _finalize(self, record: FileOperationRecord) -> None:
FILE: libs/cli/deepagents_cli/hooks.py
function _load_hooks (line 35) | def _load_hooks() -> list[dict[str, Any]]:
function _run_single_hook (line 81) | def _run_single_hook(command: list[str], event: str, payload_bytes: byte...
function _dispatch_hook_sync (line 115) | def _dispatch_hook_sync(
function dispatch_hook (line 158) | async def dispatch_hook(event: str, payload: dict[str, Any]) -> None:
function dispatch_hook_fire_and_forget (line 187) | def dispatch_hook_fire_and_forget(event: str, payload: dict[str, Any]) -...
FILE: libs/cli/deepagents_cli/input.py
class ParsedPastedPathPayload (line 92) | class ParsedPastedPathPayload:
class MediaTracker (line 107) | class MediaTracker:
method __init__ (line 110) | def __init__(self) -> None:
method add_media (line 121) | def add_media(self, data: ImageData | VideoData, kind: MediaKind) -> str:
method add_image (line 143) | def add_image(self, image_data: ImageData) -> str:
method add_video (line 154) | def add_video(self, video_data: VideoData) -> str:
method get_media (line 165) | def get_media(self, kind: MediaKind) -> list[ImageData] | list[VideoDa...
method get_images (line 178) | def get_images(self) -> list[ImageData]:
method get_videos (line 186) | def get_videos(self) -> list[VideoData]:
method clear (line 194) | def clear(self) -> None:
method sync_to_text (line 201) | def sync_to_text(self, text: str) -> None:
method _sync_kind_images (line 212) | def _sync_kind_images(self, text: str) -> bool:
method _sync_kind_videos (line 231) | def _sync_kind_videos(self, text: str) -> bool:
method _max_placeholder_id (line 251) | def _max_placeholder_id(
function parse_file_mentions (line 274) | def parse_file_mentions(text: str) -> tuple[str, list[Path]]:
function parse_pasted_file_paths (line 334) | def parse_pasted_file_paths(text: str) -> list[Path]:
function parse_pasted_path_payload (line 383) | def parse_pasted_path_payload(
function parse_single_pasted_file_path (line 420) | def parse_single_pasted_file_path(text: str) -> Path | None:
function extract_leading_pasted_file_path (line 439) | def extract_leading_pasted_file_path(text: str) -> tuple[Path, int] | None:
function normalize_pasted_path (line 473) | def normalize_pasted_path(text: str) -> Path | None:
function _split_paste_line (line 527) | def _split_paste_line(line: str) -> list[str]:
function _token_to_path (line 543) | def _token_to_path(token: str) -> Path | None:
function _leading_token_end (line 581) | def _leading_token_end(text: str) -> int | None:
function _extract_unquoted_leading_path_with_spaces (line 617) | def _extract_unquoted_leading_path_with_spaces(text: str) -> tuple[Path,...
function _normalize_windows_pasted_path (line 650) | def _normalize_windows_pasted_path(text: str) -> Path | None:
function _normalize_posix_pasted_path (line 665) | def _normalize_posix_pasted_path(text: str) -> Path | None:
function _resolve_existing_pasted_path (line 688) | def _resolve_existing_pasted_path(path: Path) -> Path | None:
function _normalize_unicode_spaces (line 720) | def _normalize_unicode_spaces(text: str) -> str:
function _resolve_with_unicode_space_variants (line 732) | def _resolve_with_unicode_space_variants(path: Path) -> Path | None:
FILE: libs/cli/deepagents_cli/integrations/sandbox_factory.py
function _run_sandbox_setup (line 35) | def _run_sandbox_setup(backend: SandboxBackendProtocol, setup_script_pat...
function create_sandbox (line 84) | def create_sandbox(
function _get_available_sandbox_types (line 145) | def _get_available_sandbox_types() -> list[str]:
function get_default_working_dir (line 154) | def get_default_working_dir(provider: str) -> str:
function _import_provider_module (line 178) | def _import_provider_module(
class _LangSmithProvider (line 214) | class _LangSmithProvider(SandboxProvider):
method __init__ (line 220) | def __init__(self, api_key: str | None = None) -> None:
method get_or_create (line 237) | def get_or_create(
method delete (line 312) | def delete(self, *, sandbox_id: str, **kwargs: Any) -> None: # noqa: ...
method _resolve_template (line 322) | def _resolve_template(
method _ensure_template (line 343) | def _ensure_template(
class _DaytonaProvider (line 372) | class _DaytonaProvider(SandboxProvider):
method __init__ (line 375) | def __init__(self) -> None:
method get_or_create (line 393) | def get_or_create(
method delete (line 446) | def delete(self, *, sandbox_id: str, **kwargs: Any) -> None: # noqa: ...
class _ModalProvider (line 452) | class _ModalProvider(SandboxProvider):
method __init__ (line 455) | def __init__(self) -> None:
method get_or_create (line 467) | def get_or_create(
method delete (line 521) | def delete(self, *, sandbox_id: str, **kwargs: Any) -> None: # noqa: ...
class _RunloopProvider (line 527) | class _RunloopProvider(SandboxProvider):
method __init__ (line 530) | def __init__(self) -> None:
method get_or_create (line 543) | def get_or_create(
method delete (line 596) | def delete(self, *, sandbox_id: str, **kwargs: Any) -> None: # noqa: ...
function _get_provider (line 601) | def _get_provider(provider_name: str) -> SandboxProvider:
function verify_sandbox_deps (line 629) | def verify_sandbox_deps(provider: str) -> None:
FILE: libs/cli/deepagents_cli/integrations/sandbox_provider.py
class SandboxError (line 13) | class SandboxError(Exception):
method original_exc (line 17) | def original_exc(self) -> BaseException | None:
class SandboxNotFoundError (line 22) | class SandboxNotFoundError(SandboxError):
class SandboxProvider (line 26) | class SandboxProvider(ABC):
method get_or_create (line 30) | def get_or_create(
method delete (line 40) | def delete(
method aget_or_create (line 49) | async def aget_or_create(
method adelete (line 64) | async def adelete(
FILE: libs/cli/deepagents_cli/local_context.py
function _build_mcp_context (line 43) | def _build_mcp_context(servers: list[MCPServerInfo]) -> str:
class _ExecutableBackend (line 80) | class _ExecutableBackend(Protocol):
method execute (line 83) | def execute(self, command: str) -> ExecuteResponse: ...
function _section_header (line 102) | def _section_header() -> str:
function _section_project (line 122) | def _section_project() -> str:
function _section_package_managers (line 162) | def _section_package_managers() -> str:
function _section_runtimes (line 192) | def _section_runtimes() -> str:
function _section_git (line 212) | def _section_git() -> str:
function _section_test_command (line 244) | def _section_test_command() -> str:
function _section_files (line 266) | def _section_files() -> str:
function _section_tree (line 296) | def _section_tree() -> str:
function _section_makefile (line 319) | def _section_makefile() -> str:
function build_detect_script (line 342) | def build_detect_script() -> str:
class LocalContextState (line 390) | class LocalContextState(AgentState):
class LocalContextMiddleware (line 412) | class LocalContextMiddleware(AgentMiddleware):
method __init__ (line 425) | def __init__(
method _run_detect_script (line 440) | def _run_detect_script(self) -> str | None:
method before_agent (line 476) | def before_agent( # type: ignore[override]
method _get_modified_request (line 531) | def _get_modified_request(self, request: ModelRequest) -> ModelRequest...
method wrap_model_call (line 551) | def wrap_model_call(
method awrap_model_call (line 568) | async def awrap_model_call(
FILE: libs/cli/deepagents_cli/main.py
function check_cli_dependencies (line 41) | def check_cli_dependencies() -> None:
function _ripgrep_install_hint (line 78) | def _ripgrep_install_hint() -> str:
function check_optional_tools (line 117) | def check_optional_tools(*, config_path: Path | None = None) -> list[str]:
function format_tool_warning_tui (line 139) | def format_tool_warning_tui(tool: str) -> str:
function format_tool_warning_cli (line 158) | def format_tool_warning_cli(tool: str) -> str:
function _preload_session_mcp_server_info (line 179) | async def _preload_session_mcp_server_info(
function parse_args (line 231) | def parse_args() -> argparse.Namespace:
function run_textual_cli_async (line 605) | async def run_textual_cli_async(
function _run_acp_cli_async (line 751) | async def _run_acp_cli_async(
function apply_stdin_pipe (line 868) | def apply_stdin_pipe(args: argparse.Namespace) -> None:
function _print_session_stats (line 984) | def _print_session_stats(stats: Any, console: Any) -> None: # noqa: ANN401
function _check_mcp_project_trust (line 998) | def _check_mcp_project_trust(*, trust_flag: bool = False) -> bool | None:
function cli_main (line 1083) | def cli_main() -> None:
FILE: libs/cli/deepagents_cli/mcp_tools.py
class MCPToolInfo (line 29) | class MCPToolInfo:
class MCPServerInfo (line 40) | class MCPServerInfo:
function _resolve_server_type (line 57) | def _resolve_server_type(server_config: dict[str, Any]) -> str:
function _validate_server_config (line 74) | def _validate_server_config(server_name: str, server_config: dict[str, A...
function load_mcp_config (line 127) | def load_mcp_config(config_path: str) -> dict[str, Any]:
function _resolve_project_config_base (line 184) | def _resolve_project_config_base(project_context: ProjectContext | None)...
function discover_mcp_configs (line 201) | def discover_mcp_configs(
function classify_discovered_configs (line 237) | def classify_discovered_configs(
function extract_stdio_server_commands (line 265) | def extract_stdio_server_commands(
function _filter_project_stdio_servers (line 288) | def _filter_project_stdio_servers(config: dict[str, Any]) -> dict[str, A...
function merge_mcp_configs (line 310) | def merge_mcp_configs(configs: list[dict[str, Any]]) -> dict[str, Any]:
function load_mcp_config_lenient (line 330) | def load_mcp_config_lenient(config_path: Path) -> dict[str, Any] | None:
class MCPSessionManager (line 355) | class MCPSessionManager:
method __init__ (line 363) | def __init__(self) -> None:
method cleanup (line 368) | async def cleanup(self) -> None:
function _check_stdio_server (line 373) | def _check_stdio_server(server_name: str, server_config: dict[str, Any])...
function _check_remote_server (line 395) | async def _check_remote_server(server_name: str, server_config: dict[str...
function _load_tools_from_config (line 428) | async def _load_tools_from_config(
function get_mcp_tools (line 548) | async def get_mcp_tools(
function resolve_and_load_mcp_tools (line 576) | async def resolve_and_load_mcp_tools(
FILE: libs/cli/deepagents_cli/mcp_trust.py
function compute_config_fingerprint (line 27) | def compute_config_fingerprint(config_paths: list[Path]) -> str:
function _load_config (line 45) | def _load_config(config_path: Path) -> dict[str, Any]:
function _save_config (line 63) | def _save_config(data: dict[str, Any], config_path: Path) -> bool:
function is_project_mcp_trusted (line 94) | def is_project_mcp_trusted(
function trust_project_mcp (line 118) | def trust_project_mcp(
function revoke_project_mcp_trust (line 146) | def revoke_project_mcp_trust(
FILE: libs/cli/deepagents_cli/media_utils.py
function _get_executable (line 53) | def _get_executable(name: str) -> str | None:
class ImageData (line 66) | class ImageData:
method to_message_content (line 73) | def to_message_content(self) -> dict:
class VideoData (line 86) | class VideoData:
method to_message_content (line 93) | def to_message_content(self) -> "VideoContentBlock":
function get_clipboard_image (line 107) | def get_clipboard_image() -> ImageData | None:
function get_image_from_path (line 126) | def get_image_from_path(path: pathlib.Path) -> ImageData | None:
function _detect_video_format (line 176) | def _detect_video_format(data: bytes) -> str | None:
function get_video_from_path (line 201) | def get_video_from_path(path: pathlib.Path) -> VideoData | None:
function get_media_from_path (line 260) | def get_media_from_path(path: pathlib.Path) -> ImageData | VideoData | N...
function _get_macos_clipboard_image (line 275) | def _get_macos_clipboard_image() -> ImageData | None:
function _get_clipboard_via_osascript (line 324) | def _get_clipboard_via_osascript() -> ImageData | None:
function encode_to_base64 (line 440) | def encode_to_base64(data: bytes) -> str:
function create_multimodal_content (line 452) | def create_multimodal_content(
FILE: libs/cli/deepagents_cli/model_config.py
class ModelConfigError (line 28) | class ModelConfigError(Exception):
class ModelSpec (line 33) | class ModelSpec:
method __post_init__ (line 52) | def __post_init__(self) -> None:
method parse (line 66) | def parse(cls, spec: str) -> ModelSpec:
method try_parse (line 88) | def try_parse(cls, spec: str) -> ModelSpec | None:
method __str__ (line 102) | def __str__(self) -> str:
class ModelProfileEntry (line 107) | class ModelProfileEntry(TypedDict):
class ProviderConfig (line 121) | class ProviderConfig(TypedDict, total=False):
function clear_caches (line 226) | def clear_caches() -> None:
function _get_builtin_providers (line 240) | def _get_builtin_providers() -> dict[str, Any]:
function _get_provider_profile_modules (line 266) | def _get_provider_profile_modules() -> list[tuple[str, str]]:
function _load_provider_profiles (line 292) | def _load_provider_profiles(module_path: str) -> dict[str, Any]:
function _profile_module_from_class_path (line 346) | def _profile_module_from_class_path(class_path: str) -> str | None:
function get_available_models (line 365) | def get_available_models() -> dict[str, list[str]]:
function _build_entry (line 491) | def _build_entry(
function get_model_profiles (line 517) | def get_model_profiles(
function has_provider_credentials (line 652) | def has_provider_credentials(provider: str) -> bool | None:
function get_credential_env_var (line 703) | def get_credential_env_var(provider: str) -> str | None:
class ModelConfig (line 723) | class ModelConfig:
method __post_init__ (line 740) | def __post_init__(self) -> None:
method load (line 746) | def load(cls, config_path: Path | None = None) -> ModelConfig:
method _validate (line 810) | def _validate(self) -> None:
method is_provider_enabled (line 865) | def is_provider_enabled(self, provider_name: str) -> bool:
method get_all_models (line 883) | def get_all_models(self) -> list[tuple[str, str]]:
method get_provider_for_model (line 899) | def get_provider_for_model(self, model_name: str) -> str | None:
method has_credentials (line 915) | def has_credentials(self, provider_name: str) -> bool | None:
method get_base_url (line 939) | def get_base_url(self, provider_name: str) -> str | None:
method get_api_key_env (line 951) | def get_api_key_env(self, provider_name: str) -> str | None:
method get_class_path (line 963) | def get_class_path(self, provider_name: str) -> str | None:
method get_kwargs (line 975) | def get_kwargs(
method get_profile_overrides (line 1002) | def get_profile_overrides(
function _save_model_field (line 1030) | def _save_model_field(
function save_default_model (line 1083) | def save_default_model(model_spec: str, config_path: Path | None = None)...
function clear_default_model (line 1104) | def clear_default_model(config_path: Path | None = None) -> bool:
function is_warning_suppressed (line 1152) | def is_warning_suppressed(key: str, config_path: Path | None = None) -> ...
function suppress_warning (line 1196) | def suppress_warning(key: str, config_path: Path | None = None) -> bool:
class ThreadConfig (line 1258) | class ThreadConfig(NamedTuple):
function load_thread_config (line 1274) | def load_thread_config(config_path: Path | None = None) -> ThreadConfig:
function invalidate_thread_config_cache (line 1337) | def invalidate_thread_config_cache() -> None:
function load_thread_columns (line 1343) | def load_thread_columns(config_path: Path | None = None) -> dict[str, bo...
function save_thread_columns (line 1371) | def save_thread_columns(
function load_thread_relative_time (line 1415) | def load_thread_relative_time(config_path: Path | None = None) -> bool:
function save_thread_relative_time (line 1439) | def save_thread_relative_time(enabled: bool, config_path: Path | None = ...
function load_thread_sort_order (line 1477) | def load_thread_sort_order(config_path: Path | None = None) -> str:
function save_thread_sort_order (line 1501) | def save_thread_sort_order(sort_order: str, config_path: Path | None = N...
function save_recent_model (line 1547) | def save_recent_model(model_spec: str, config_path: Path | None = None) ...
FILE: libs/cli/deepagents_cli/non_interactive.py
class HITLIterationLimitError (line 72) | class HITLIterationLimitError(RuntimeError):
function _write_text (line 92) | def _write_text(text: str) -> None:
function _write_newline (line 106) | def _write_newline() -> None:
class _ConsoleSpinner (line 112) | class _ConsoleSpinner:
method __init__ (line 119) | def __init__(self, console: Console) -> None:
method start (line 123) | def start(self, message: str = "Working...") -> None:
method stop (line 146) | def stop(self) -> None:
class StreamState (line 158) | class StreamState:
class ThreadUrlLookupState (line 207) | class ThreadUrlLookupState:
function _start_langsmith_thread_url_lookup (line 218) | def _start_langsmith_thread_url_lookup(thread_id: str) -> ThreadUrlLooku...
function _process_interrupts (line 245) | def _process_interrupts(
function _process_ai_message (line 285) | def _process_ai_message(
function _process_message_chunk (line 355) | def _process_message_chunk(
function _process_stream_chunk (line 402) | def _process_stream_chunk(
function _make_hitl_decision (line 445) | def _make_hitl_decision(
function _collect_action_request_warnings (line 515) | def _collect_action_request_warnings(action_request: ActionRequest) -> l...
function _process_hitl_interrupts (line 550) | def _process_hitl_interrupts(state: StreamState, console: Console) -> None:
function _stream_agent (line 571) | async def _stream_agent(
function _run_agent_loop (line 606) | async def _run_agent_loop(
function _build_non_interactive_header (line 698) | def _build_non_interactive_header(
function run_non_interactive (line 744) | async def run_non_interactive(
FILE: libs/cli/deepagents_cli/offload.py
class OffloadResult (line 36) | class OffloadResult:
class OffloadThresholdNotMet (line 62) | class OffloadThresholdNotMet:
class OffloadModelError (line 79) | class OffloadModelError(Exception):
function format_offload_limit (line 88) | def format_offload_limit(
function offload_messages_to_backend (line 122) | async def offload_messages_to_backend(
function perform_offload (line 210) | async def perform_offload(
FILE: libs/cli/deepagents_cli/output.py
function add_json_output_arg (line 18) | def add_json_output_arg(
function write_json (line 49) | def write_json(command: str, data: list | dict) -> None:
FILE: libs/cli/deepagents_cli/project_utils.py
class ProjectContext (line 21) | class ProjectContext:
method __post_init__ (line 32) | def __post_init__(self) -> None:
method from_user_cwd (line 46) | def from_user_cwd(cls, user_cwd: str | Path) -> ProjectContext:
method resolve_user_path (line 61) | def resolve_user_path(self, path: str | Path) -> Path:
method project_agent_md_paths (line 75) | def project_agent_md_paths(self) -> list[Path]:
method project_skills_dir (line 81) | def project_skills_dir(self) -> Path | None:
method project_agents_dir (line 87) | def project_agents_dir(self) -> Path | None:
method project_agent_skills_dir (line 93) | def project_agent_skills_dir(self) -> Path | None:
function get_server_project_context (line 100) | def get_server_project_context(
function find_project_root (line 135) | def find_project_root(start_path: str | Path | None = None) -> Path | None:
function find_project_agent_md (line 159) | def find_project_agent_md(project_root: Path) -> list[Path]:
FILE: libs/cli/deepagents_cli/remote_client.py
function _require_thread_id (line 23) | def _require_thread_id(config: dict[str, Any] | None) -> str:
class RemoteAgent (line 42) | class RemoteAgent:
method __init__ (line 51) | def __init__(
method _get_graph (line 78) | def _get_graph(self) -> Any: # noqa: ANN401
method astream (line 95) | async def astream(
method aget_state (line 179) | async def aget_state(
method aupdate_state (line 215) | async def aupdate_state(
method aensure_thread (line 243) | async def aensure_thread(self, config: dict[str, Any]) -> None:
method with_config (line 286) | def with_config(self, config: dict[str, Any]) -> RemoteAgent: # noqa:...
function _prepare_config (line 303) | def _prepare_config(config: dict[str, Any] | None) -> dict[str, Any]:
function _convert_interrupts (line 318) | def _convert_interrupts(raw: Any) -> list[Any]: # noqa: ANN401
function _convert_ai_message (line 357) | def _convert_ai_message(data: dict[str, Any]) -> Any: # noqa: ANN401
function _convert_human_message (line 427) | def _convert_human_message(data: dict[str, Any]) -> Any: # noqa: ANN401
function _convert_tool_message (line 452) | def _convert_tool_message(data: dict[str, Any]) -> Any: # noqa: ANN401
function _convert_message_data (line 497) | def _convert_message_data(data: dict[str, Any]) -> Any: # noqa: ANN401
FILE: libs/cli/deepagents_cli/server.py
function _port_in_use (line 34) | def _port_in_use(host: str, port: int) -> bool:
function _find_free_port (line 55) | def _find_free_port(host: str) -> int:
function get_server_url (line 71) | def get_server_url(host: str = _DEFAULT_HOST, port: int = _DEFAULT_PORT)...
function generate_langgraph_json (line 84) | def generate_langgraph_json(
function _scoped_env_overrides (line 126) | def _scoped_env_overrides(
function wait_for_server_healthy (line 164) | async def wait_for_server_healthy(
function _build_server_cmd (line 226) | def _build_server_cmd(config_path: Path, *, host: str, port: int) -> lis...
function _build_server_env (line 253) | def _build_server_env() -> dict[str, str]:
class ServerProcess (line 280) | class ServerProcess:
method __init__ (line 289) | def __init__(
method url (line 319) | def url(self) -> str:
method running (line 324) | def running(self) -> bool:
method _read_log_file (line 328) | def _read_log_file(self) -> str:
method start (line 349) | async def start(
method _stop_process (line 412) | def _stop_process(self) -> None:
method stop (line 449) | def stop(self) -> None:
method update_env (line 471) | def update_env(self, **overrides: str) -> None:
method restart (line 483) | async def restart(self, *, timeout: float = _HEALTH_TIMEOUT) -> None: ...
method __aenter__ (line 502) | async def __aenter__(self) -> Self:
method __aexit__ (line 511) | async def __aexit__(self, *args: object) -> None:
FILE: libs/cli/deepagents_cli/server_graph.py
function _build_tools (line 31) | def _build_tools(
function make_graph (line 93) | def make_graph() -> Any: # noqa: ANN401
FILE: libs/cli/deepagents_cli/server_manager.py
function _set_or_clear_server_env (line 40) | def _set_or_clear_server_env(name: str, value: str | None) -> None:
function _apply_server_config (line 54) | def _apply_server_config(config: ServerConfig) -> None:
function _capture_project_context (line 69) | def _capture_project_context() -> ProjectContext | None:
function _scaffold_workspace (line 87) | def _scaffold_workspace(work_dir: Path) -> None:
function _write_checkpointer (line 114) | def _write_checkpointer(work_dir: Path) -> None:
function _write_pyproject (line 159) | def _write_pyproject(work_dir: Path) -> None:
function start_server_and_get_agent (line 189) | async def start_server_and_get_agent(
function server_session (line 280) | async def server_session(
FILE: libs/cli/deepagents_cli/sessions.py
function _patch_aiosqlite (line 34) | def _patch_aiosqlite() -> None:
function _connect (line 65) | async def _connect() -> AsyncIterator[aiosqlite.Connection]:
class ThreadInfo (line 82) | class ThreadInfo(TypedDict):
class _CheckpointSummary (line 113) | class _CheckpointSummary(NamedTuple):
function format_timestamp (line 123) | def format_timestamp(iso_timestamp: str | None) -> str:
function format_relative_timestamp (line 151) | def format_relative_timestamp(iso_timestamp: str | None) -> str:
function format_path (line 194) | def format_path(path: str | None) -> str:
function get_db_path (line 224) | def get_db_path() -> Path:
function generate_thread_id (line 235) | def generate_thread_id() -> str:
function _table_exists (line 246) | async def _table_exists(conn: aiosqlite.Connection, table: str) -> bool:
function list_threads (line 257) | async def list_threads(
function populate_thread_message_counts (line 344) | async def populate_thread_message_counts(threads: list[ThreadInfo]) -> l...
function populate_thread_checkpoint_details (line 364) | async def populate_thread_checkpoint_details(
function prewarm_thread_message_counts (line 396) | async def prewarm_thread_message_counts(limit: int | None = None) -> None:
function get_cached_threads (line 431) | def get_cached_threads(
function apply_cached_thread_message_counts (line 473) | def apply_cached_thread_message_counts(threads: list[ThreadInfo]) -> int:
function apply_cached_thread_initial_prompts (line 496) | def apply_cached_thread_initial_prompts(threads: list[ThreadInfo]) -> int:
function _populate_message_counts (line 519) | async def _populate_message_counts(
function _get_jsonplus_serializer (line 532) | async def _get_jsonplus_serializer() -> JsonPlusSerializer:
function _create_jsonplus_serializer (line 543) | def _create_jsonplus_serializer() -> JsonPlusSerializer:
function _cache_message_count (line 554) | def _cache_message_count(thread_id: str, freshness: str | None, count: i...
function _cache_initial_prompt (line 564) | def _cache_initial_prompt(
function _thread_freshness (line 578) | def _thread_freshness(thread: ThreadInfo) -> str | None:
function _cache_recent_threads (line 583) | def _cache_recent_threads(
function _copy_threads (line 597) | def _copy_threads(threads: list[ThreadInfo]) -> list[ThreadInfo]:
function _count_messages_from_checkpoint (line 602) | async def _count_messages_from_checkpoint(
function _extract_initial_prompt (line 624) | async def _extract_initial_prompt(
function populate_thread_initial_prompts (line 643) | async def populate_thread_initial_prompts(threads: list[ThreadInfo]) -> ...
function _populate_checkpoint_fields (line 661) | async def _populate_checkpoint_fields(
function _load_latest_checkpoint_summaries_batch (line 729) | async def _load_latest_checkpoint_summaries_batch(
function _load_latest_checkpoint_summary (line 791) | async def _load_latest_checkpoint_summary(
function _summarize_checkpoint (line 828) | def _summarize_checkpoint(data: object) -> _CheckpointSummary:
function _checkpoint_messages (line 841) | def _checkpoint_messages(data: object) -> list[object]:
function _initial_prompt_from_messages (line 859) | def _initial_prompt_from_messages(messages: list[object]) -> str | None:
function _coerce_prompt_text (line 867) | def _coerce_prompt_text(content: object) -> str | None:
function get_most_recent (line 891) | async def get_most_recent(agent_name: str | None = None) -> str | None:
function get_thread_agent (line 920) | async def get_thread_agent(thread_id: str) -> str | None:
function thread_exists (line 941) | async def thread_exists(thread_id: str) -> bool:
function find_similar_threads (line 957) | async def find_similar_threads(thread_id: str, limit: int = 3) -> list[s...
function delete_thread (line 984) | async def delete_thread(thread_id: str) -> bool:
function get_checkpointer (line 1010) | async def get_checkpointer() -> AsyncIterator[AsyncSqliteSaver]:
function get_thread_limit (line 1027) | def get_thread_limit() -> int:
function list_threads_command (line 1052) | async def list_threads_command(
function delete_thread_command (line 1197) | async def delete_thread_command(
FILE: libs/cli/deepagents_cli/skills/commands.py
function _validate_name (line 28) | def _validate_name(name: str) -> tuple[bool, str]:
function _validate_skill_path (line 85) | def _validate_skill_path(skill_dir: Path, base_dir: Path) -> tuple[bool,...
function _format_info_fields (line 109) | def _format_info_fields(skill: SkillMetadata) -> list[tuple[str, str]]:
function _list (line 142) | def _list(
function _generate_template (line 328) | def _generate_template(skill_name: str) -> str:
function _create (line 400) | def _create(
function _info (line 509) | def _info(
function _delete (line 643) | def _delete(
function setup_skills_parser (line 825) | def setup_skills_parser(
function execute_skills_command (line 982) | def execute_skills_command(args: argparse.Namespace) -> None:
FILE: libs/cli/deepagents_cli/skills/load.py
class ExtendedSkillMetadata (line 31) | class ExtendedSkillMetadata(SkillMetadata):
function list_skills (line 45) | def list_skills(
FILE: libs/cli/deepagents_cli/subagents.py
class SubagentMetadata (line 34) | class SubagentMetadata(TypedDict):
function _parse_subagent_file (line 56) | def _parse_subagent_file(file_path: Path) -> SubagentMetadata | None:
function _load_subagents_from_dir (line 110) | def _load_subagents_from_dir(
function list_subagents (line 146) | def list_subagents(
FILE: libs/cli/deepagents_cli/textual_adapter.py
function _get_hitl_request_adapter (line 64) | def _get_hitl_request_adapter(hitl_request_type: type) -> TypeAdapter:
function _format_duration (line 82) | def _format_duration(seconds: float) -> str:
function print_usage_table (line 101) | def print_usage_table(
function _get_git_branch (line 180) | def _get_git_branch() -> str | None:
function _build_stream_config (line 210) | def _build_stream_config(
function _is_summarization_chunk (line 257) | def _is_summarization_chunk(metadata: dict | None) -> bool:
class TextualUIAdapter (line 276) | class TextualUIAdapter:
method __init__ (line 326) | def __init__(
method set_token_tracker (line 373) | def set_token_tracker(self, tracker: Any) -> None: # noqa: ANN401 # ...
method finalize_pending_tools_with_error (line 377) | def finalize_pending_tools_with_error(self, error: str) -> None:
function _build_interrupted_ai_message (line 395) | def _build_interrupted_ai_message(
function _read_mentioned_file (line 433) | def _read_mentioned_file(file_path: Path, max_embed_bytes: int) -> str:
function execute_task_textual (line 456) | async def execute_task_textual(
function _flush_assistant_text_ns (line 1335) | async def _flush_assistant_text_ns(
FILE: libs/cli/deepagents_cli/tool_display.py
function _format_timeout (line 22) | def _format_timeout(seconds: int) -> str:
function _coerce_timeout_seconds (line 41) | def _coerce_timeout_seconds(timeout: int | str | None) -> int | None:
function truncate_value (line 66) | def truncate_value(value: str, max_length: int = MAX_ARG_LENGTH) -> str:
function _sanitize_display_value (line 77) | def _sanitize_display_value(value: object, *, max_length: int = MAX_ARG_...
function format_tool_display (line 98) | def format_tool_display(tool_name: str, tool_args: dict) -> str:
function _format_content_block (line 251) | def _format_content_block(block: dict) -> str:
function format_tool_message_content (line 286) | def format_tool_message_content(content: Any) -> str: # noqa: ANN401 #...
FILE: libs/cli/deepagents_cli/tools.py
function _get_tavily_client (line 14) | def _get_tavily_client() -> TavilyClient | None:
function http_request (line 35) | def http_request(
function web_search (line 104) | def web_search( # noqa: ANN201 # Return type depends on dynamic tool c...
function fetch_url (line 183) | def fetch_url(url: str, timeout: int = 30) -> dict[str, Any]:
FILE: libs/cli/deepagents_cli/ui.py
function _print_option_section (line 21) | def _print_option_section(*lines: str, title: str = "Options") -> None:
function show_help (line 35) | def show_help() -> None:
function show_list_help (line 145) | def show_list_help() -> None:
function show_reset_help (line 165) | def show_reset_help() -> None:
function show_skills_help (line 192) | def show_skills_help() -> None:
function show_skills_list_help (line 238) | def show_skills_list_help() -> None:
function show_skills_create_help (line 251) | def show_skills_create_help() -> None:
function show_skills_info_help (line 269) | def show_skills_info_help() -> None:
function show_skills_delete_help (line 282) | def show_skills_delete_help() -> None:
function show_threads_help (line 301) | def show_threads_help() -> None:
function show_threads_delete_help (line 325) | def show_threads_delete_help() -> None:
function show_threads_list_help (line 338) | def show_threads_list_help() -> None:
FILE: libs/cli/deepagents_cli/unicode_security.py
class UnicodeIssue (line 85) | class UnicodeIssue:
method __post_init__ (line 100) | def __post_init__(self) -> None: # noqa: D105
class UrlSafetyResult (line 117) | class UrlSafetyResult:
function detect_dangerous_unicode (line 140) | def detect_dangerous_unicode(text: str) -> list[UnicodeIssue]:
function strip_dangerous_unicode (line 164) | def strip_dangerous_unicode(text: str) -> str:
function render_with_unicode_markers (line 176) | def render_with_unicode_markers(text: str) -> str:
function summarize_issues (line 198) | def summarize_issues(issues: list[UnicodeIssue], *, max_items: int = 3) ...
function format_warning_detail (line 230) | def format_warning_detail(warnings: tuple[str, ...], *, max_shown: int =...
function check_url_safety (line 248) | def check_url_safety(url: str) -> UrlSafetyResult:
function _decode_hostname (line 315) | def _decode_hostname(hostname: str) -> tuple[str, list[str]]:
function _split_hostname_labels (line 335) | def _split_hostname_labels(hostname: str) -> list[str]:
function _is_local_or_ip_hostname (line 344) | def _is_local_or_ip_hostname(hostname: str) -> bool:
function _scripts_in_label (line 364) | def _scripts_in_label(label: str) -> set[str]:
function _label_has_suspicious_confusable_mix (line 379) | def _label_has_suspicious_confusable_mix(label: str) -> bool:
function _char_script (line 396) | def _char_script(character: str) -> str:
function _format_codepoint (line 437) | def _format_codepoint(character: str) -> str:
function _unicode_name (line 446) | def _unicode_name(character: str) -> str:
function iter_string_values (line 460) | def iter_string_values(
function _iter_string_values_from_list (line 484) | def _iter_string_values_from_list(
function looks_like_url_key (line 508) | def looks_like_url_key(arg_path: str) -> bool:
FILE: libs/cli/deepagents_cli/update_check.py
function _parse_version (line 51) | def _parse_version(v: str) -> tuple[int, ...]:
function get_latest_version (line 63) | def get_latest_version(*, bypass_cache: bool = False) -> str | None:
function is_update_available (line 115) | def is_update_available(*, bypass_cache: bool = False) -> tuple[bool, st...
function detect_install_method (line 146) | def detect_install_method() -> InstallMethod:
function upgrade_command (line 173) | def upgrade_command(method: InstallMethod | None = None) -> str:
function perform_upgrade (line 188) | async def perform_upgrade() -> tuple[bool, str]:
function is_update_check_enabled (line 245) | def is_update_check_enabled() -> bool:
function is_auto_update_enabled (line 258) | def is_auto_update_enabled() -> bool:
function _read_update_config (line 277) | def _read_update_config() -> dict[str, bool]:
function get_seen_version (line 300) | def get_seen_version() -> str | None:
function mark_version_seen (line 311) | def mark_version_seen(version: str) -> None:
function should_show_whats_new (line 323) | def should_show_whats_new() -> bool:
FILE: libs/cli/deepagents_cli/widgets/_links.py
function open_style_link (line 17) | def open_style_link(event: Click) -> None:
FILE: libs/cli/deepagents_cli/widgets/approval.py
class ApprovalMenu (line 42) | class ApprovalMenu(Container):
class Decided (line 74) | class Decided(Message):
method __init__ (line 77) | def __init__(self, decision: dict[str, str]) -> None:
method __init__ (line 90) | def __init__(
method set_future (line 129) | def set_future(self, future: asyncio.Future[dict[str, str]]) -> None:
method _check_expandable_command (line 133) | def _check_expandable_command(self) -> bool:
method _get_command_display (line 147) | def _get_command_display(self, *, expanded: bool) -> Content:
method compose (line 203) | def compose(self) -> ComposeResult:
method on_mount (line 276) | async def on_mount(self) -> None:
method _update_tool_info (line 286) | async def _update_tool_info(self) -> None:
method _update_options (line 325) | def _update_options(self) -> None:
method action_move_up (line 352) | def action_move_up(self) -> None:
method action_move_down (line 357) | def action_move_down(self) -> None:
method action_select (line 362) | def action_select(self) -> None:
method action_select_approve (line 366) | def action_select_approve(self) -> None:
method action_select_auto (line 372) | def action_select_auto(self) -> None:
method action_select_reject (line 378) | def action_select_reject(self) -> None:
method action_toggle_expand (line 384) | def action_toggle_expand(self) -> None:
method _handle_selection (line 393) | def _handle_selection(self, option: int) -> None:
method _collect_security_warnings (line 409) | def _collect_security_warnings(self) -> list[str]:
method on_blur (line 440) | def on_blur(self, event: events.Blur) -> None: # noqa: ARG002 # Text...
FILE: libs/cli/deepagents_cli/widgets/ask_user.py
class AskUserMenu (line 35) | class AskUserMenu(Container):
class Answered (line 50) | class Answered(Message):
method __init__ (line 53) | def __init__(self, answers: list[str]) -> None: # noqa: D107
class Cancelled (line 57) | class Cancelled(Message):
method __init__ (line 60) | def __init__(self) -> None: # noqa: D107
method __init__ (line 63) | def __init__( # noqa: D107
method set_future (line 78) | def set_future(self, future: asyncio.Future[AskUserWidgetResult]) -> N...
method compose (line 82) | def compose(self) -> ComposeResult: # noqa: D102
method on_mount (line 111) | async def on_mount(self) -> None: # noqa: D102
method focus_active (line 116) | def focus_active(self) -> None:
method on_input_submitted (line 120) | def on_input_submitted(self, event: Input.Submitted) -> None: # noqa:...
method confirm_and_advance (line 132) | def confirm_and_advance(self, index: int) -> None:
method _set_active_question (line 161) | def _set_active_question(self, index: int) -> None:
method _submit (line 173) | def _submit(self) -> None:
method action_next_question (line 181) | def action_next_question(self) -> None:
method action_previous_question (line 186) | def action_previous_question(self) -> None:
method action_cancel (line 191) | def action_cancel(self) -> None: # noqa: D102
method on_blur (line 199) | def on_blur(self, event: events.Blur) -> None: # noqa: PLR6301 # Tex...
class _ChoiceOption (line 204) | class _ChoiceOption(Static):
method __init__ (line 207) | def __init__(
method toggle (line 215) | def toggle(self) -> None:
method select (line 220) | def select(self) -> None:
method deselect (line 225) | def deselect(self) -> None:
method _render (line 230) | def _render(self) -> Content:
class _QuestionWidget (line 241) | class _QuestionWidget(Vertical):
method __init__ (line 255) | def __init__(self, question: Question, index: int, **kwargs: Any) -> N...
method compose (line 271) | def compose(self) -> ComposeResult:
method focus_input (line 303) | def focus_input(self) -> None:
method get_answer (line 312) | def get_answer(self) -> str:
method action_move_up (line 325) | def action_move_up(self) -> None:
method action_move_down (line 345) | def action_move_down(self) -> None:
method action_select_or_submit (line 355) | def action_select_or_submit(self) -> None:
method _find_menu (line 372) | def _find_menu(self) -> AskUserMenu | None:
method _update_choice_selection (line 384) | def _update_choice_selection(self) -> None:
FILE: libs/cli/deepagents_cli/widgets/autocomplete.py
function _get_git_executable (line 23) | def _get_git_executable() -> str | None:
class CompletionResult (line 36) | class CompletionResult(StrEnum):
class CompletionView (line 44) | class CompletionView(Protocol):
method render_completion_suggestions (line 47) | def render_completion_suggestions(
method clear_completion_suggestions (line 58) | def clear_completion_suggestions(self) -> None:
method replace_completion_range (line 62) | def replace_completion_range(self, start: int, end: int, replacement: ...
class CompletionController (line 73) | class CompletionController(Protocol):
method can_handle (line 76) | def can_handle(self, text: str, cursor_index: int) -> bool:
method on_text_changed (line 80) | def on_text_changed(self, text: str, cursor_index: int) -> None:
method on_key (line 84) | def on_key(
method reset (line 90) | def reset(self) -> None:
class SlashCommandController (line 110) | class SlashCommandController:
method __init__ (line 113) | def __init__(
method can_handle (line 130) | def can_handle(text: str, cursor_index: int) -> bool: # noqa: ARG004 ...
method reset (line 138) | def reset(self) -> None:
method _score_command (line 146) | def _score_command(search: str, cmd: str, desc: str, keywords: str = "...
method on_text_changed (line 186) | def on_text_changed(self, text: str, cursor_index: int) -> None:
method on_key (line 223) | def on_key(
method _move_selection (line 255) | def _move_selection(self, delta: int) -> None:
method _apply_selected_completion (line 265) | def _apply_selected_completion(self, cursor_index: int) -> bool:
function _get_project_files (line 296) | def _get_project_files(root: Path) -> list[str]:
function _fuzzy_score (line 336) | def _fuzzy_score(query: str, candidate: str) -> float:
function _is_dotpath (line 383) | def _is_dotpath(path: str) -> bool:
function _path_depth (line 392) | def _path_depth(path: str) -> int:
function _fuzzy_search (line 401) | def _fuzzy_search(
class FuzzyFileController (line 440) | class FuzzyFileController:
method __init__ (line 443) | def __init__(
method _get_files (line 461) | def _get_files(self) -> list[str]:
method refresh_cache (line 471) | def refresh_cache(self) -> None:
method warm_cache (line 475) | async def warm_cache(self) -> None:
method can_handle (line 486) | def can_handle(text: str, cursor_index: int) -> bool:
method reset (line 507) | def reset(self) -> None:
method on_text_changed (line 514) | def on_text_changed(self, text: str, cursor_index: int) -> None:
method _get_fuzzy_suggestions (line 535) | def _get_fuzzy_suggestions(self, search: str) -> list[tuple[str, str]]:
method on_key (line 557) | def on_key(
method _move_selection (line 585) | def _move_selection(self, delta: int) -> None:
method _apply_selected_completion (line 595) | def _apply_selected_completion(self, text: str, cursor_index: int) -> ...
class MultiCompletionManager (line 626) | class MultiCompletionManager:
method __init__ (line 629) | def __init__(self, controllers: list[CompletionController]) -> None:
method on_text_changed (line 638) | def on_text_changed(self, text: str, cursor_index: int) -> None:
method on_key (line 663) | def on_key(
method reset (line 675) | def reset(self) -> None:
FILE: libs/cli/deepagents_cli/widgets/chat_input.py
function _default_history_path (line 41) | def _default_history_path() -> Path:
class CompletionOption (line 78) | class CompletionOption(Static):
class Clicked (line 101) | class Clicked(Message):
method __init__ (line 104) | def __init__(self, index: int) -> None:
method __init__ (line 109) | def __init__(
method on_mount (line 132) | def on_mount(self) -> None:
method _update_display (line 136) | def _update_display(self) -> None:
method set_selected (line 159) | def set_selected(self, *, selected: bool) -> None:
method set_content (line 165) | def set_content(
method on_click (line 175) | def on_click(self, event: Click) -> None:
class CompletionPopup (line 181) | class CompletionPopup(VerticalScroll):
class OptionClicked (line 192) | class OptionClicked(Message):
method __init__ (line 195) | def __init__(self, index: int) -> None:
method __init__ (line 200) | def __init__(self, **kwargs: Any) -> None:
method update_suggestions (line 210) | def update_suggestions(
method _rebuild_options (line 227) | async def _rebuild_options(self, generation: int) -> None:
method update_selection (line 289) | def update_selection(self, selected_index: int) -> None:
method on_completion_option_clicked (line 308) | def on_completion_option_clicked(self, event: CompletionOption.Clicked...
method hide (line 313) | def hide(self) -> None:
method show (line 319) | def show(self) -> None:
class ChatTextArea (line 324) | class ChatTextArea(TextArea):
class Submitted (line 367) | class Submitted(Message):
method __init__ (line 370) | def __init__(self, value: str) -> None:
class HistoryPrevious (line 375) | class HistoryPrevious(Message):
method __init__ (line 378) | def __init__(self, current_text: str) -> None:
class HistoryNext (line 383) | class HistoryNext(Message):
class PastedPaths (line 386) | class PastedPaths(Message):
method __init__ (line 389) | def __init__(self, raw_text: str, paths: list[Path]) -> None:
class Typing (line 395) | class Typing(Message):
method __init__ (line 402) | def __init__(self, **kwargs: Any) -> None:
method set_app_focus (line 420) | def set_app_focus(self, *, has_focus: bool) -> None:
method set_completion_active (line 432) | def set_completion_active(self, *, active: bool) -> None:
method action_insert_newline (line 436) | def action_insert_newline(self) -> None:
method _cancel_paste_burst_timer (line 440) | def _cancel_paste_burst_timer(self) -> None:
method _schedule_paste_burst_flush (line 447) | def _schedule_paste_burst_flush(self) -> None:
method _start_paste_burst (line 454) | def _start_paste_burst(self, char: str, now: float) -> None:
method _append_paste_burst (line 460) | def _append_paste_burst(self, text: str, now: float) -> None:
method _should_start_paste_burst (line 469) | def _should_start_paste_burst(self, char: str) -> bool:
method _flush_paste_burst (line 482) | async def _flush_paste_burst(self) -> None:
method _delete_preceding_backslash (line 507) | def _delete_preceding_backslash(self) -> bool:
method _on_key (line 531) | async def _on_key(self, event: events.Key) -> None:
method _delete_image_placeholder (line 668) | def _delete_image_placeholder(self, *, backwards: bool) -> bool:
method _find_image_placeholder_span (line 693) | def _find_image_placeholder_span(
method _on_paste (line 725) | async def _on_paste(self, event: events.Paste) -> None:
method set_text_from_history (line 747) | def set_text_from_history(self, text: str) -> None:
method clear_text (line 761) | def clear_text(self) -> None:
class _CompletionViewAdapter (line 776) | class _CompletionViewAdapter:
method __init__ (line 779) | def __init__(self, chat_input: ChatInput) -> None:
method render_completion_suggestions (line 783) | def render_completion_suggestions(
method clear_completion_suggestions (line 789) | def clear_completion_suggestions(self) -> None:
method replace_completion_range (line 793) | def replace_completion_range(self, start: int, end: int, replacement: ...
class ChatInput (line 802) | class ChatInput(Vertical):
class Submitted (line 868) | class Submitted(Message):
method __init__ (line 871) | def __init__(self, value: str, mode: str = "normal") -> None:
class ModeChanged (line 877) | class ModeChanged(Message):
method __init__ (line 880) | def __init__(self, mode: str) -> None:
class Typing (line 885) | class Typing(Message):
method __init__ (line 895) | def __init__(
method compose (line 949) | def compose(self) -> ComposeResult: # noqa: PLR6301 # Textual widget...
method on_mount (line 961) | def on_mount(self) -> None:
method on_text_area_changed (line 989) | def on_text_area_changed(self, event: TextArea.Changed) -> None:
method _parse_dropped_path_payload (line 1052) | def _parse_dropped_path_payload(
method _parse_dropped_path_payload_with_command_recovery (line 1064) | def _parse_dropped_path_payload_with_command_recovery(
method _extract_leading_dropped_path_with_command_recovery (line 1100) | def _extract_leading_dropped_path_with_command_recovery(
method _is_existing_path_payload (line 1135) | def _is_existing_path_payload(text: str) -> bool:
method _is_dropped_path_payload (line 1143) | def _is_dropped_path_payload(self, text: str) -> bool:
method _strip_mode_prefix (line 1154) | def _strip_mode_prefix(self) -> None:
method _completion_text_and_cursor (line 1177) | def _completion_text_and_cursor(self) -> tuple[str, int]:
method _completion_index_to_text_index (line 1196) | def _completion_index_to_text_index(self, index: int) -> int:
method _submit_value (line 1222) | def _submit_value(self, value: str) -> None:
method _sync_media_tracker_to_text (line 1256) | def _sync_media_tracker_to_text(self, text: str) -> None:
method on_chat_text_area_typing (line 1276) | def on_chat_text_area_typing(
method on_chat_text_area_submitted (line 1283) | def on_chat_text_area_submitted(self, event: ChatTextArea.Submitted) -...
method on_chat_text_area_history_previous (line 1291) | def on_chat_text_area_history_previous(
method on_chat_text_area_history_next (line 1306) | def on_chat_text_area_history_next(
method on_chat_text_area_pasted_paths (line 1324) | def on_chat_text_area_pasted_paths(self, event: ChatTextArea.PastedPat...
method handle_external_paste (line 1331) | def handle_external_paste(self, pasted: str) -> bool:
method _apply_inline_dropped_path_replacement (line 1356) | def _apply_inline_dropped_path_replacement(self, text: str) -> bool:
method _insert_pasted_paths (line 1389) | def _insert_pasted_paths(self, raw_text: str, paths: list[Path]) -> None:
method _build_path_replacement (line 1406) | def _build_path_replacement(
method _replace_submitted_paths_with_images (line 1478) | def _replace_submitted_paths_with_images(self, value: str) -> str:
method _history_entry_mode_and_text (line 1527) | def _history_entry_mode_and_text(entry: str) -> tuple[str, str]:
method on_key (line 1543) | async def on_key(self, event: events.Key) -> None:
method _get_cursor_offset (line 1582) | def _get_cursor_offset(self) -> int:
method watch_mode (line 1604) | def watch_mode(self, mode: str) -> None:
method focus_input (line 1626) | def focus_input(self) -> None:
method value (line 1632) | def value(self) -> str:
method value (line 1643) | def value(self, val: str) -> None:
method input_widget (line 1649) | def input_widget(self) -> ChatTextArea | None:
method set_disabled (line 1657) | def set_disabled(self, *, disabled: bool) -> None:
method set_cursor_active (line 1666) | def set_cursor_active(self, *, active: bool) -> None:
method exit_mode (line 1675) | def exit_mode(self) -> bool:
method dismiss_completion (line 1689) | def dismiss_completion(self) -> bool:
method render_completion_suggestions (line 1708) | def render_completion_suggestions(
method clear_completion_suggestions (line 1726) | def clear_completion_suggestions(self) -> None:
method on_completion_popup_option_clicked (line 1737) | def on_completion_popup_option_clicked(
method replace_completion_range (line 1778) | def replace_completion_range(self, start: int, end: int, replacement: ...
FILE: libs/cli/deepagents_cli/widgets/diff.py
function format_diff_textual (line 18) | def format_diff_textual(diff: str, max_lines: int | None = 100) -> Content:
class EnhancedDiff (line 131) | class EnhancedDiff(Vertical):
method __init__ (line 158) | def __init__(
method _compute_stats (line 179) | def _compute_stats(self) -> tuple[int, int]:
method on_mount (line 194) | def on_mount(self) -> None:
method compose (line 199) | def compose(self) -> ComposeResult:
FILE: libs/cli/deepagents_cli/widgets/history.py
class HistoryManager (line 15) | class HistoryManager:
method __init__ (line 22) | def __init__(self, history_file: Path, max_entries: int = 100) -> None:
method _load_history (line 37) | def _load_history(self) -> None:
method _append_to_file (line 63) | def _append_to_file(self, text: str) -> None:
method _compact_history (line 76) | def _compact_history(self) -> None:
method add (line 93) | def add(self, text: str) -> None:
method get_previous (line 120) | def get_previous(self, current_input: str, *, query: str = "") -> str ...
method get_next (line 154) | def get_next(self) -> str | None:
method in_history (line 180) | def in_history(self) -> bool:
method reset_navigation (line 184) | def reset_navigation(self) -> None:
FILE: libs/cli/deepagents_cli/widgets/loading.py
class Spinner (line 18) | class Spinner:
method __init__ (line 21) | def __init__(self) -> None:
method frames (line 26) | def frames(self) -> tuple[str, ...]:
method next_frame (line 30) | def next_frame(self) -> str:
method current_frame (line 41) | def current_frame(self) -> str:
class LoadingWidget (line 50) | class LoadingWidget(Static):
method __init__ (line 85) | def __init__(self, status: str = "Thinking") -> None:
method compose (line 101) | def compose(self) -> ComposeResult:
method on_mount (line 121) | def on_mount(self) -> None:
method _update_animation (line 126) | def _update_animation(self) -> None:
method set_status (line 139) | def set_status(self, status: str) -> None:
method pause (line 149) | def pause(self, status: str = "Awaiting decision") -> None:
method resume (line 166) | def resume(self) -> None:
method stop (line 173) | def stop(self) -> None:
FILE: libs/cli/deepagents_cli/widgets/mcp_viewer.py
class MCPToolItem (line 24) | class MCPToolItem(Static):
method __init__ (line 27) | def __init__(
method _format_collapsed (line 55) | def _format_collapsed(self, name: str, description: str) -> Content:
method _format_expanded (line 83) | def _format_expanded(name: str, description: str) -> Content:
method toggle_expand (line 101) | def toggle_expand(self) -> None:
method on_mount (line 112) | def on_mount(self) -> None:
method on_resize (line 117) | def on_resize(self) -> None:
method on_click (line 122) | def on_click(self, event: Click) -> None:
class MCPViewerScreen (line 135) | class MCPViewerScreen(ModalScreen[None]):
method __init__ (line 225) | def __init__(self, server_info: list[MCPServerInfo]) -> None:
method compose (line 236) | def compose(self) -> ComposeResult:
method on_mount (line 301) | async def on_mount(self) -> None:
method _move_to (line 307) | def _move_to(self, index: int) -> None:
method _move_selection (line 323) | def _move_selection(self, delta: int) -> None:
method action_move_up (line 335) | def action_move_up(self) -> None:
method action_move_down (line 339) | def action_move_down(self) -> None:
method action_toggle_expand (line 343) | def action_toggle_expand(self) -> None:
method action_page_up (line 348) | def action_page_up(self) -> None:
method action_page_down (line 353) | def action_page_down(self) -> None:
method action_cancel (line 358) | def action_cancel(self) -> None:
FILE: libs/cli/deepagents_cli/widgets/message_store.py
class MessageType (line 40) | class MessageType(StrEnum):
class ToolStatus (line 52) | class ToolStatus(StrEnum):
class MessageData (line 64) | class MessageData:
method __post_init__ (line 129) | def __post_init__(self) -> None:
method to_widget (line 139) | def to_widget(self) -> Widget:
method from_widget (line 201) | def from_widget(cls, widget: Widget) -> MessageData:
class MessageStore (line 305) | class MessageStore:
method __init__ (line 323) | def __init__(self) -> None:
method total_count (line 333) | def total_count(self) -> int:
method visible_count (line 338) | def visible_count(self) -> int:
method has_messages_above (line 343) | def has_messages_above(self) -> bool:
method has_messages_below (line 348) | def has_messages_below(self) -> bool:
method append (line 352) | def append(self, message: MessageData) -> None:
method bulk_load (line 361) | def bulk_load(
method get_message (line 390) | def get_message(self, message_id: str) -> MessageData | None:
method get_message_at_index (line 404) | def get_message_at_index(self, index: int) -> MessageData | None:
method update_message (line 417) | def update_message(self, message_id: str, **updates: Any) -> bool:
method set_active_message (line 446) | def set_active_message(self, message_id: str | None) -> None:
method is_active (line 456) | def is_active(self, message_id: str) -> bool:
method window_exceeded (line 467) | def window_exceeded(self) -> bool:
method get_messages_to_prune (line 475) | def get_messages_to_prune(self, count: int | None = None) -> list[Mess...
method mark_pruned (line 508) | def mark_pruned(self, message_ids: list[str]) -> None:
method get_messages_to_hydrate (line 524) | def get_messages_to_hydrate(self, count: int | None = None) -> list[Me...
method mark_hydrated (line 542) | def mark_hydrated(self, count: int) -> None:
method should_hydrate_above (line 550) | def should_hydrate_above(
method should_prune_below (line 569) | def should_prune_below(
method clear (line 594) | def clear(self) -> None:
method get_visible_range (line 601) | def get_visible_range(self) -> tuple[int, int]:
method get_all_messages (line 609) | def get_all_messages(self) -> list[MessageData]:
method get_visible_messages (line 617) | def get_visible_messages(self) -> list[MessageData]:
FILE: libs/cli/deepagents_cli/widgets/messages.py
function _show_timestamp_toast (line 40) | def _show_timestamp_toast(widget: Static | Vertical) -> None:
class _TimestampClickMixin (line 66) | class _TimestampClickMixin:
method on_click (line 75) | def on_click(self, event: Click) -> None: # noqa: ARG002 # Textual e...
function _mode_color (line 80) | def _mode_color(mode: str | None) -> str:
class FormattedOutput (line 101) | class FormattedOutput:
class UserMessage (line 142) | class UserMessage(_TimestampClickMixin, Static):
method __init__ (line 155) | def __init__(self, content: str, **kwargs: Any) -> None:
method on_mount (line 165) | def on_mount(self) -> None:
method compose (line 172) | def compose(self) -> ComposeResult:
class QueuedUserMessage (line 224) | class QueuedUserMessage(Static):
method __init__ (line 241) | def __init__(self, content: str, **kwargs: Any) -> None:
method on_mount (line 251) | def on_mount(self) -> None:
method compose (line 256) | def compose(self) -> ComposeResult:
class AssistantMessage (line 273) | class AssistantMessage(_TimestampClickMixin, Vertical):
method __init__ (line 293) | def __init__(self, content: str = "", **kwargs: Any) -> None:
method compose (line 305) | def compose(self) -> ComposeResult: # noqa: PLR6301 # Textual widget...
method on_mount (line 315) | def on_mount(self) -> None:
method _get_markdown (line 321) | def _get_markdown(self) -> Markdown:
method _ensure_stream (line 333) | def _ensure_stream(self) -> MarkdownStream:
method append_content (line 345) | async def append_content(self, text: str) -> None:
method write_initial_content (line 360) | async def write_initial_content(self) -> None:
method stop_stream (line 366) | async def stop_stream(self) -> None:
method set_content (line 372) | async def set_content(self, content: str) -> None:
class ToolCallMessage (line 386) | class ToolCallMessage(Vertical):
method __init__ (line 458) | def __init__(
method compose (line 491) | def compose(self) -> ComposeResult:
method on_mount (line 524) | def on_mount(self) -> None:
method _restore_deferred_state (line 542) | def _restore_deferred_state(self) -> None:
method set_running (line 603) | def set_running(self) -> None:
method _update_running_animation (line 619) | def _update_running_animation(self) -> None:
method _stop_animation (line 636) | def _stop_animation(self) -> None:
method set_success (line 642) | def set_success(self, result: str = "") -> None:
method set_error (line 657) | def set_error(self, error: str) -> None:
method set_rejected (line 685) | def set_rejected(self) -> None:
method set_skipped (line 697) | def set_skipped(self) -> None:
method toggle_output (line 707) | def toggle_output(self) -> None:
method on_click (line 714) | def on_click(self, event: Click) -> None:
method _format_output (line 722) | def _format_output(
method _prefix_output (line 775) | def _prefix_output(self, content: Content) -> Content: # noqa: PLR630...
method _format_todos_output (line 793) | def _format_todos_output(
method _parse_todo_items (line 825) | def _parse_todo_items(self, output: str) -> list | None: # noqa: PLR6...
method _build_todo_stats (line 843) | def _build_todo_stats(self, items: list) -> Content: # noqa: PLR6301 ...
method _format_single_todo (line 866) | def _format_single_todo(self, item: dict | str) -> Content: # noqa: P...
method _format_ls_output (line 898) | def _format_ls_output( # noqa: PLR6301 # Grouped as method for widge...
method _format_file_output (line 937) | def _format_file_output( # noqa: PLR6301 # Grouped as method for wid...
method _format_search_output (line 957) | def _format_search_output( # noqa: PLR6301 # Grouped as method for w...
method _format_shell_output (line 1007) | def _format_shell_output( # noqa: PLR6301 # Grouped as method for wi...
method _format_web_output (line 1033) | def _format_web_output(
method _try_parse_web_data (line 1049) | def _try_parse_web_data(output: str) -> dict | None:
method _format_web_dict (line 1062) | def _format_web_dict(self, data: dict, *, is_preview: bool) -> Formatt...
method _format_web_search_results (line 1104) | def _format_web_search_results( # noqa: PLR6301 # Grouped as method ...
method _format_lines_output (line 1130) | def _format_lines_output( # noqa: PLR6301 # Grouped as method for wi...
method _format_task_output (line 1146) | def _format_task_output( # noqa: PLR6301 # Grouped as method for wid...
method _update_output_display (line 1166) | def _update_output_display(self) -> None:
method has_output (line 1231) | def has_output(self) -> bool:
method _filtered_args (line 1239) | def _filtered_args(self) -> dict[str, Any]:
class DiffMessage (line 1255) | class DiffMessage(_TimestampClickMixin, Static):
method __init__ (line 1292) | def __init__(self, diff_content: str, file_path: str = "", **kwargs: A...
method compose (line 1304) | def compose(self) -> ComposeResult:
method on_mount (line 1320) | def on_mount(self) -> None:
class ErrorMessage (line 1326) | class ErrorMessage(_TimestampClickMixin, Static):
method __init__ (line 1340) | def __init__(self, error: str, **kwargs: Any) -> None:
method on_mount (line 1354) | def on_mount(self) -> None:
class AppMessage (line 1360) | class AppMessage(Static):
method __init__ (line 1379) | def __init__(self, message: str | Content, **kwargs: Any) -> None:
method on_click (line 1395) | def on_click(self, event: Click) -> None:
class SummarizationMessage (line 1401) | class SummarizationMessage(AppMessage):
method __init__ (line 1416) | def __init__(self, message: str | Content | None = None, **kwargs: Any...
FILE: libs/cli/deepagents_cli/widgets/model_selector.py
class ModelOption (line 36) | class ModelOption(Static):
method __init__ (line 39) | def __init__(
class Clicked (line 67) | class Clicked(Message):
method __init__ (line 70) | def __init__(self, model_spec: str, provider: str, index: int) -> None:
method on_click (line 83) | def on_click(self, event: Click) -> None:
class ModelSelectorScreen (line 93) | class ModelSelectorScreen(ModalScreen[tuple[str, str] | None]):
method __init__ (line 202) | def __init__(
method _find_current_model_index (line 242) | def _find_current_model_index(self) -> int:
method compose (line 257) | def compose(self) -> ComposeResult:
method on_mount (line 299) | async def on_mount(self) -> None:
method on_input_changed (line 312) | def on_input_changed(self, event: Input.Changed) -> None:
method on_input_submitted (line 322) | def on_input_submitted(self, event: Input.Submitted) -> None:
method on_model_option_clicked (line 331) | def on_model_option_clicked(self, event: ModelOption.Clicked) -> None:
method _update_filtered_list (line 340) | def _update_filtered_list(self) -> None:
method _update_display (line 376) | async def _update_display(self) -> None:
method _format_option_label (line 504) | def _format_option_label(
method _format_footer (line 549) | def _format_footer(
method _get_model_status (line 659) | def _get_model_status(self, model_spec: str) -> str | None:
method _update_footer (line 677) | def _update_footer(self) -> None:
method _move_selection (line 693) | def _move_selection(self, delta: int) -> None:
method action_move_up (line 744) | def action_move_up(self) -> None:
method action_move_down (line 748) | def action_move_down(self) -> None:
method action_tab_complete (line 752) | def action_tab_complete(self) -> None:
method _visible_page_size (line 761) | def _visible_page_size(self) -> int:
method action_page_up (line 786) | def action_page_up(self) -> None:
method action_page_down (line 796) | def action_page_down(self) -> None:
method action_select (line 807) | def action_select(self) -> None:
method action_set_default (line 825) | async def action_set_default(self) -> None:
method _restore_help_text (line 864) | def _restore_help_text(self) -> None:
method action_cancel (line 876) | def action_cancel(self) -> None:
FILE: libs/cli/deepagents_cli/widgets/status.py
class ModelLabel (line 27) | class ModelLabel(Widget):
method get_content_width (line 38) | def get_content_width(self, container: Size, viewport: Size) -> int: ...
method render (line 53) | def render(self) -> RenderResult:
class StatusBar (line 72) | class StatusBar(Horizontal):
method __init__ (line 170) | def __init__(self, cwd: str | Path | None = None, **kwargs: Any) -> None:
method compose (line 181) | def compose(self) -> ComposeResult: # noqa: PLR6301 — Textual widget ...
method on_resize (line 206) | def on_resize(self, event: events.Resize) -> None:
method on_mount (line 221) | def on_mount(self) -> None:
method watch_mode (line 231) | def watch_mode(self, mode: str) -> None:
method watch_auto_approve (line 249) | def watch_auto_approve(self, new_value: bool) -> None:
method watch_cwd (line 264) | def watch_cwd(self, new_value: str) -> None:
method watch_branch (line 272) | def watch_branch(self, new_value: str) -> None:
method watch_status_message (line 281) | def watch_status_message(self, new_value: str) -> None:
method _format_cwd (line 296) | def _format_cwd(self, cwd_path: str = "") -> str:
method set_mode (line 312) | def set_mode(self, mode: str) -> None:
method set_auto_approve (line 320) | def set_auto_approve(self, *, enabled: bool) -> None:
method set_status_message (line 328) | def set_status_message(self, message: str) -> None:
method watch_tokens (line 336) | def watch_tokens(self, new_value: int) -> None:
method set_tokens (line 352) | def set_tokens(self, count: int) -> None:
method hide_tokens (line 360) | def hide_tokens(self) -> None:
method set_model (line 364) | def set_model(self, *, provider: str, model: str) -> None:
FILE: libs/cli/deepagents_cli/widgets/thread_selector.py
function _apply_column_width (line 110) | def _apply_column_width(
function _active_sort_key (line 127) | def _active_sort_key(sort_by_updated: bool) -> str:
function _visible_column_keys (line 132) | def _visible_column_keys(columns: dict[str, bool]) -> list[str]:
function _collapse_whitespace (line 144) | def _collapse_whitespace(value: str) -> str:
function _truncate_value (line 156) | def _truncate_value(value: str, width: int | None) -> str:
function _format_column_value (line 180) | def _format_column_value(
function _format_header_label (line 227) | def _format_header_label(key: str) -> str:
function _header_cell_classes (line 232) | def _header_cell_classes(key: str, *, sort_key: str) -> str:
class ThreadOption (line 248) | class ThreadOption(Horizontal):
method __init__ (line 251) | def __init__(
class Clicked (line 285) | class Clicked(Message):
method __init__ (line 288) | def __init__(self, thread_id: str, index: int) -> None:
method compose (line 299) | def compose(self) -> ComposeResult:
method _cursor_text (line 322) | def _cursor_text(self) -> str:
method set_selected (line 326) | def set_selected(self, selected: bool) -> None:
method on_click (line 344) | def on_click(self, event: Click) -> None:
class DeleteThreadConfirmScreen (line 354) | class DeleteThreadConfirmScreen(ModalScreen[bool]):
method __init__ (line 387) | def __init__(self, thread_id: str) -> None:
method compose (line 396) | def compose(self) -> ComposeResult:
method action_confirm (line 415) | def action_confirm(self) -> None:
method action_cancel (line 419) | def action_cancel(self) -> None:
class ThreadSelectorScreen (line 424) | class ThreadSelectorScreen(ModalScreen[str | None]):
method __init__ (line 623) | def __init__(
method _switch_id (line 671) | def _switch_id(column_key: str) -> str:
method _switch_column_key (line 676) | def _switch_column_key(switch_id: str | None) -> str | None:
method _sync_selected_index (line 689) | def _sync_selected_index(self) -> None:
method _build_title (line 697) | def _build_title(self, thread_url: str | None = None) -> str | Content:
method _build_help_text (line 720) | def _build_help_text(self) -> str:
method _effective_thread_limit (line 743) | def _effective_thread_limit(self) -> int:
method _format_sort_toggle_label (line 751) | def _format_sort_toggle_label(self) -> str:
method _get_filter_input (line 756) | def _get_filter_input(self) -> Input:
method _filter_focus_order (line 762) | def _filter_focus_order(self) -> list[Input | Checkbox]:
method compose (line 780) | def compose(self) -> ComposeResult:
method on_mount (line 871) | async def on_mount(self) -> None:
method on_input_changed (line 894) | def on_input_changed(self, event: Input.Changed) -> None:
method on_input_submitted (line 903) | def on_input_submitted(self, event: Input.Submitted) -> None:
method on_key (line 912) | def on_key(self, event: Key) -> None:
method _collapse_search_selection (line 934) | def _collapse_search_selection(self) -> None:
method on_checkbox_changed (line 941) | def on_checkbox_changed(self, event: Checkbox.Changed) -> None:
method _update_filtered_list (line 995) | def _update_filtered_list(self) -> None:
method _compute_column_widths (line 1043) | def _compute_column_widths(self) -> dict[str, int | None]:
method _get_search_text (line 1086) | def _get_search_text(thread: ThreadInfo) -> str:
method _schedule_filter_and_rebuild (line 1109) | def _schedule_filter_and_rebuild(self) -> None:
method _filter_and_build (line 1117) | async def _filter_and_build(self) -> None:
method _compute_filtered (line 1135) | def _compute_filtered(
method _schedule_list_rebuild (line 1190) | def _schedule_list_rebuild(self) -> None:
method _pending_checkpoint_fields (line 1198) | def _pending_checkpoint_fields(self) -> tuple[bool, bool]:
method _populate_visible_checkpoint_details (line 1208) | async def _populate_visible_checkpoint_details(self) -> tuple[bool, bo...
method _schedule_checkpoint_enrichment (line 1227) | def _schedule_checkpoint_enrichment(self) -> None:
method _threads_match (line 1239) | def _threads_match(old: list[ThreadInfo], new: list[ThreadInfo]) -> bool:
method _load_threads (line 1258) | async def _load_threads(self) -> None:
method _load_checkpoint_details (line 1328) | async def _load_checkpoint_details(self) -> None:
method _refresh_cell_labels (line 1366) | def _refresh_cell_labels(self) -> None:
method _resolve_thread_url (line 1379) | def _resolve_thread_url(self) -> None:
method _fetch_thread_url (line 1385) | async def _fetch_thread_url(self) -> None:
method _show_mount_error (line 1418) | async def _show_mount_error(self, detail: str) -> None:
method _build_list (line 1445) | async def _build_list(self, *, recompute_widths: bool = True) -> None:
method _create_option_widgets (line 1479) | def _create_option_widgets(self) -> tuple[list[ThreadOption], ThreadOp...
method _scroll_selected_into_view (line 1514) | def _scroll_selected_into_view(self) -> None:
method _update_help_widgets (line 1530) | def _update_help_widgets(self) -> None:
method _schedule_header_rebuild (line 1546) | def _schedule_header_rebuild(self) -> None:
method _rebuild_header (line 1554) | async def _rebuild_header(self) -> None:
method _apply_sort (line 1576) | def _apply_sort(self) -> None:
method _move_selection (line 1583) | def _move_selection(self, delta: int) -> None:
method action_move_up (line 1606) | def action_move_up(self) -> None:
method action_move_down (line 1612) | def action_move_down(self) -> None:
method _visible_page_size (line 1618) | def _visible_page_size(self) -> int:
method action_page_up (line 1639) | def action_page_up(self) -> None:
method action_page_down (line 1649) | def action_page_down(self) -> None:
method action_select (line 1660) | def action_select(self) -> None:
method action_focus_next_filter (line 1668) | def action_focus_next_filter(self) -> None:
method action_focus_previous_filter (line 1681) | def action_focus_previous_filter(self) -> None:
method action_toggle_sort (line 1694) | def action_toggle_sort(self) -> None:
method _persist_sort_order (line 1708) | def _persist_sort_order(self, order: str) -> None:
method action_delete_thread (line 1720) | def action_delete_thread(self) -> None:
method is_delete_confirmation_open (line 1733) | def is_delete_confirmation_open(self) -> bool:
method _on_delete_confirmed (line 1737) | def _on_delete_confirmed(self, thread_id: str, confirmed: bool | None)...
method _handle_delete_confirm (line 1754) | async def _handle_delete_confirm(self, thread_id: str) -> None:
method on_click (line 1800) | def on_click(self, event: Click) -> None: # noqa: PLR6301 # Textual ...
method on_thread_option_clicked (line 1804) | def on_thread_option_clicked(self, event: ThreadOption.Clicked) -> None:
method action_cancel (line 1816) | def action_cancel(self) -> None:
FILE: libs/cli/deepagents_cli/widgets/tool_renderers.py
class ToolRenderer (line 18) | class ToolRenderer:
method get_approval_widget (line 22) | def get_approval_widget(
class WriteFileRenderer (line 36) | class WriteFileRenderer(ToolRenderer):
method get_approval_widget (line 40) | def get_approval_widget( # noqa: D102 # Protocol method — docstring ...
class EditFileRenderer (line 60) | class EditFileRenderer(ToolRenderer):
method get_approval_widget (line 64) | def get_approval_widget( # noqa: D102 # Protocol method — docstring ...
method _generate_diff (line 83) | def _generate_diff(old_string: str, new_string: str) -> list[str]:
function get_renderer (line 118) | def get_renderer(tool_name: str) -> ToolRenderer:
FILE: libs/cli/deepagents_cli/widgets/tool_widgets.py
class ToolApprovalWidget (line 21) | class ToolApprovalWidget(Vertical):
method __init__ (line 24) | def __init__(self, data: dict[str, Any]) -> None:
method compose (line 29) | def compose(self) -> ComposeResult: # noqa: PLR6301 # Textual widget...
class GenericApprovalWidget (line 38) | class GenericApprovalWidget(ToolApprovalWidget):
method compose (line 41) | def compose(self) -> ComposeResult:
class WriteFileApprovalWidget (line 59) | class WriteFileApprovalWidget(ToolApprovalWidget):
method compose (line 62) | def compose(self) -> ComposeResult:
class EditFileApprovalWidget (line 92) | class EditFileApprovalWidget(ToolApprovalWidget):
method compose (line 95) | def compose(self) -> ComposeResult:
method _count_stats (line 130) | def _count_stats(
method _format_stats (line 155) | def _format_stats(additions: int, deletions: int) -> Content:
method _render_diff_lines_only (line 172) | def _render_diff_lines_only(self, diff_lines: list[str]) -> ComposeRes...
method _render_strings_only (line 197) | def _render_strings_only(self, old_string: str, new_string: str) -> Co...
method _render_diff_line (line 213) | def _render_diff_line(line: str) -> Static | None:
method _render_string_lines (line 240) | def _render_string_lines(text: str, *, is_addition: bool) -> ComposeRe...
FILE: libs/cli/deepagents_cli/widgets/welcome.py
class WelcomeBanner (line 45) | class WelcomeBanner(Static):
method __init__ (line 62) | def __init__(
method on_mount (line 101) | def on_mount(self) -> None:
method _fetch_and_update (line 106) | async def _fetch_and_update(self) -> None:
method update_thread_id (line 121) | def update_thread_id(self, thread_id: str) -> None:
method set_connected (line 130) | def set_connected(self, mcp_tool_count: int = 0) -> None:
method set_failed (line 141) | def set_failed(self, error: str) -> None:
method on_click (line 152) | def on_click(self, event: Click) -> None: # noqa: PLR6301 # Textual ...
method _build_banner (line 156) | def _build_banner(self, project_url: str | None = None) -> Content:
function build_failure_footer (line 241) | def build_failure_footer(error: str) -> Content:
function build_connecting_footer (line 257) | def build_connecting_footer(
function build_welcome_footer (line 280) | def build_welcome_footer() -> Content:
FILE: libs/cli/examples/skills/arxiv-search/arxiv_search.py
function query_arxiv (line 10) | def query_arxiv(query: str, max_papers: int = 10) -> str:
function main (line 44) | def main() -> None:
FILE: libs/cli/examples/skills/skill-creator/scripts/init_skill.py
function title_case_skill_name (line 190) | def title_case_skill_name(skill_name):
function init_skill (line 199) | def init_skill(skill_name, path):
function main (line 278) | def main():
FILE: libs/cli/examples/skills/skill-creator/scripts/quick_validate.py
function validate_skill (line 20) | def validate_skill(skill_path):
FILE: libs/cli/tests/integration_tests/benchmarks/test_codspeed_import_benchmarks.py
function _evict_modules (line 47) | def _evict_modules() -> None:
function _clean_module_cache (line 58) | def _clean_module_cache() -> Iterator[None]:
class TestStartupPathBenchmarks (line 73) | class TestStartupPathBenchmarks:
method test_import_app (line 76) | def test_import_app(self, benchmark: BenchmarkFixture) -> None:
method test_import_main (line 85) | def test_import_main(self, benchmark: BenchmarkFixture) -> None:
method test_import_cli_context (line 94) | def test_import_cli_context(self, benchmark: BenchmarkFixture) -> None:
method test_import_ask_user_types (line 103) | def test_import_ask_user_types(self, benchmark: BenchmarkFixture) -> N...
method test_import_textual_adapter (line 112) | def test_import_textual_adapter(self, benchmark: BenchmarkFixture) -> ...
method test_import_tool_display (line 121) | def test_import_tool_display(self, benchmark: BenchmarkFixture) -> None:
method test_import_config (line 130) | def test_import_config(self, benchmark: BenchmarkFixture) -> None:
method test_import_ui (line 139) | def test_import_ui(self, benchmark: BenchmarkFixture) -> None:
method test_import_file_ops (line 148) | def test_import_file_ops(self, benchmark: BenchmarkFixture) -> None:
class TestRuntimePathBenchmarks (line 166) | class TestRuntimePathBenchmarks:
method test_import_configurable_model (line 169) | def test_import_configurable_model(self, benchmark: BenchmarkFixture) ...
method test_import_ask_user (line 178) | def test_import_ask_user(self, benchmark: BenchmarkFixture) -> None:
FILE: libs/cli/tests/integration_tests/benchmarks/test_startup_benchmarks.py
function _run_python (line 75) | def _run_python(code: str, *, timeout: int = 60) -> subprocess.Completed...
function _get_loaded_modules (line 94) | def _get_loaded_modules(import_statement: str) -> set[str]:
class TestImportIsolation (line 131) | class TestImportIsolation:
method test_no_heavy_imports_on_lightweight_path (line 166) | def test_no_heavy_imports_on_lightweight_path(self, import_stmt: str) ...
class TestCLIStartupTime (line 187) | class TestCLIStartupTime:
method _time_cli_command (line 196) | def _time_cli_command(args: str) -> float:
method test_help_under_threshold (line 221) | def test_help_under_threshold(self) -> None:
method test_version_under_threshold (line 230) | def test_version_under_threshold(self) -> None:
class TestImportTiming (line 246) | class TestImportTiming:
method test_module_import_time (line 270) | def test_module_import_time(self, module: str) -> None:
class TestDeferredImportsWork (line 298) | class TestDeferredImportsWork:
method test_agent_import_loads_langchain (line 306) | def test_agent_import_loads_langchain(self) -> None:
method test_sessions_import_available (line 314) | def test_sessions_import_available(self) -> None:
method test_configurable_model_middleware_loads_langchain (line 321) | def test_configurable_model_middleware_loads_langchain(self) -> None:
method test_ask_user_middleware_loads_langchain (line 331) | def test_ask_user_middleware_loads_langchain(self) -> None:
FILE: libs/cli/tests/integration_tests/conftest.py
function langsmith_client (line 11) | def langsmith_client() -> Generator[Client | None, None, None]:
function flush_langsmith_after_test (line 32) | def flush_langsmith_after_test(langsmith_client: Client) -> Generator[No...
FILE: libs/cli/tests/integration_tests/test_acp_mode.py
class _AcpSmokeClient (line 22) | class _AcpSmokeClient(Client):
method request_permission (line 23) | async def request_permission(self, *args: Any, **kwargs: Any) -> Any: ...
method write_text_file (line 27) | async def write_text_file(self, *args: Any, **kwargs: Any) -> Any: # ...
method read_text_file (line 31) | async def read_text_file(self, *args: Any, **kwargs: Any) -> Any: # n...
method create_terminal (line 35) | async def create_terminal(self, *args: Any, **kwargs: Any) -> Any: # ...
method terminal_output (line 39) | async def terminal_output(self, *args: Any, **kwargs: Any) -> Any: # ...
method release_terminal (line 43) | async def release_terminal(self, *args: Any, **kwargs: Any) -> Any: #...
method wait_for_terminal_exit (line 47) | async def wait_for_terminal_exit(self, *args: Any, **kwargs: Any) -> A...
method kill_terminal (line 51) | async def kill_terminal(self, *args: Any, **kwargs: Any) -> Any: # no...
method ext_method (line 55) | async def ext_method(self, method: str, params: dict) -> dict: # noqa...
method ext_notification (line 58) | async def ext_notification(self, method: str, params: dict) -> None: ...
function test_cli_acp_mode_starts_session_and_exits (line 62) | async def test_cli_acp_mode_starts_session_and_exits() -> None:
FILE: libs/cli/tests/integration_tests/test_compact_resume.py
function _write_model_config (line 15) | def _write_model_config(home_dir: Path) -> None:
function _build_long_prompt (line 29) | def _build_long_prompt(turn: int) -> str:
function _run_turn (line 38) | async def _run_turn(agent, *, thread_id: str, assistant_id: str, prompt:...
function _event_field (line 54) | def _event_field(event: object, key: str) -> object | None:
function test_compact_resumed_thread_uses_persisted_history (line 62) | async def test_compact_resumed_thread_uses_persisted_history(
FILE: libs/cli/tests/integration_tests/test_sandbox_factory.py
class BaseSandboxIntegrationTest (line 20) | class BaseSandboxIntegrationTest(ABC):
method sandbox (line 29) | def sandbox(self) -> Iterator[SandboxBackendProtocol]:
method test_sandbox_creation (line 33) | def test_sandbox_creation(self, sandbox: SandboxBackendProtocol) -> None:
method test_upload_single_file (line 39) | def test_upload_single_file(self, sandbox: SandboxBackendProtocol) -> ...
method test_download_single_file (line 53) | def test_download_single_file(self, sandbox: SandboxBackendProtocol) -...
method test_upload_download_roundtrip (line 68) | def test_upload_download_roundtrip(self, sandbox: SandboxBackendProtoc...
method test_upload_multiple_files (line 82) | def test_upload_multiple_files(self, sandbox: SandboxBackendProtocol) ...
method test_download_multiple_files (line 97) | def test_download_multiple_files(self, sandbox: SandboxBackendProtocol...
method test_download_nonexistent_file (line 119) | def test_download_nonexistent_file(self, sandbox: SandboxBackendProtoc...
method test_upload_binary_content (line 130) | def test_upload_binary_content(self, sandbox: SandboxBackendProtocol) ...
method test_partial_success_upload (line 145) | def test_partial_success_upload(self, sandbox: SandboxBackendProtocol)...
method test_partial_success_download (line 161) | def test_partial_success_download(self, sandbox: SandboxBackendProtoco...
method test_download_error_file_not_found (line 183) | def test_download_error_file_not_found(
method test_download_error_is_directory (line 199) | def test_download_error_is_directory(self, sandbox: SandboxBackendProt...
method test_upload_error_parent_not_found (line 216) | def test_upload_error_parent_not_found(
method test_upload_error_invalid_path (line 243) | def test_upload_error_invalid_path(self, sandbox: SandboxBackendProtoc...
method test_download_error_invalid_path (line 257) | def test_download_error_invalid_path(self, sandbox: SandboxBackendProt...
method test_upload_to_existing_directory_path (line 272) | def test_upload_to_existing_directory_path(
class TestRunLoopIntegration (line 291) | class TestRunLoopIntegration(BaseSandboxIntegrationTest):
method sandbox (line 295) | def sandbox(self) -> Iterator[BaseSandbox]:
class TestDaytonaIntegration (line 301) | class TestDaytonaIntegration(BaseSandboxIntegrationTest):
method sandbox (line 305) | def sandbox(self) -> Iterator[BaseSandbox]:
class TestModalIntegration (line 311) | class TestModalIntegration(BaseSandboxIntegrationTest):
method sandbox (line 315) | def sandbox(self) -> Iterator[BaseSandbox]:
class TestLangSmithIntegration (line 321) | class TestLangSmithIntegration(BaseSandboxIntegrationTest):
method sandbox (line 325) | def sandbox(self) -> Iterator[BaseSandbox]:
FILE: libs/cli/tests/integration_tests/test_sandbox_operations.py
class TestSandboxOperations (line 23) | class TestSandboxOperations:
method sandbox (line 27) | def sandbox(self) -> Iterator[SandboxBackendProtocol]:
method setup_test_dir (line 33) | def setup_test_dir(self, sandbox: SandboxBackendProtocol) -> None:
method test_write_new_file (line 41) | def test_write_new_file(self, sandbox: SandboxBackendProtocol) -> None:
method test_write_creates_parent_dirs (line 54) | def test_write_creates_parent_dirs(self, sandbox: SandboxBackendProtoc...
method test_write_existing_file_fails (line 66) | def test_write_existing_file_fails(self, sandbox: SandboxBackendProtoc...
method test_write_special_characters (line 81) | def test_write_special_characters(self, sandbox: SandboxBackendProtoco...
method test_write_empty_file (line 96) | def test_write_empty_file(self, sandbox: SandboxBackendProtocol) -> None:
method test_write_path_with_spaces (line 110) | def test_write_path_with_spaces(self, sandbox: SandboxBackendProtocol)...
method test_write_unicode_content (line 122) | def test_write_unicode_content(self, sandbox: SandboxBackendProtocol) ...
method test_write_consecutive_slashes_in_path (line 134) | def test_write_consecutive_slashes_in_path(
method test_write_very_long_content (line 148) | def test_write_very_long_content(self, sandbox: SandboxBackendProtocol...
method test_write_content_with_only_newlines (line 162) | def test_write_content_with_only_newlines(
method test_read_basic_file (line 177) | def test_read_basic_file(self, sandbox: SandboxBackendProtocol) -> None:
method test_read_nonexistent_file (line 192) | def test_read_nonexistent_file(self, sandbox: SandboxBackendProtocol) ...
method test_read_empty_file (line 201) | def test_read_empty_file(self, sandbox: SandboxBackendProtocol) -> None:
method test_read_with_offset (line 214) | def test_read_with_offset(self, sandbox: SandboxBackendProtocol) -> None:
method test_read_with_limit (line 229) | def test_read_with_limit(self, sandbox: SandboxBackendProtocol) -> None:
method test_read_with_offset_and_limit (line 245) | def test_read_with_offset_and_limit(self, sandbox: SandboxBackendProto...
method test_read_unicode_content (line 262) | def test_read_unicode_content(self, sandbox: SandboxBackendProtocol) -...
method test_read_file_with_very_long_lines (line 277) | def test_read_file_with_very_long_lines(
method test_read_with_zero_limit (line 295) | def test_read_with_zero_limit(self, sandbox: SandboxBackendProtocol) -...
method test_read_offset_beyond_file_length (line 311) | def test_read_offset_beyond_file_length(
method test_read_offset_at_exact_file_length (line 331) | def test_read_offset_at_exact_file_length(
method test_read_very_large_file_in_chunks (line 350) | def test_read_very_large_file_in_chunks(
method test_edit_single_occurrence (line 387) | def test_edit_single_occurrence(self, sandbox: SandboxBackendProtocol)...
method test_edit_multiple_occurrences_without_replace_all (line 405) | def test_edit_multiple_occurrences_without_replace_all(
method test_edit_multiple_occurrences_with_replace_all (line 425) | def test_edit_multiple_occurrences_with_replace_all(
method test_edit_string_not_found (line 445) | def test_edit_string_not_found(self, sandbox: SandboxBackendProtocol) ...
method test_edit_nonexistent_file (line 456) | def test_edit_nonexistent_file(self, sandbox: SandboxBackendProtocol) ...
method test_edit_special_characters (line 465) | def test_edit_special_characters(self, sandbox: SandboxBackendProtocol...
method test_edit_multiline_support (line 487) | def test_edit_multiline_support(self, sandbox: SandboxBackendProtocol)...
method test_edit_with_empty_new_string (line 507) | def test_edit_with_empty_new_string(self, sandbox: SandboxBackendProto...
method test_edit_identical_strings (line 525) | def test_edit_identical_strings(self, sandbox: SandboxBackendProtocol)...
method test_edit_unicode_content (line 542) | def test_edit_unicode_content(self, sandbox: SandboxBackendProtocol) -...
method test_edit_whitespace_only_strings (line 559) | def test_edit_whitespace_only_strings(
method test_edit_with_very_long_strings (line 577) | def test_edit_with_very_long_strings(self, sandbox: SandboxBackendProt...
method test_edit_line_ending_preservation (line 596) | def test_edit_line_ending_preservation(
method test_edit_partial_line_match (line 615) | def test_edit_partial_line_match(self, sandbox: SandboxBackendProtocol...
method test_ls_path_is_absolute (line 634) | def test_ls_path_is_absolute(self, sandbox: SandboxBackendProtocol) ->...
method test_ls_basic_directory (line 644) | def test_ls_basic_directory(self, sandbox: SandboxBackendProtocol) -> ...
method test_ls_empty_directory (line 667) | def test_ls_empty_directory(self, sandbox: SandboxBackendProtocol) -> ...
method test_ls_nonexistent_directory (line 676) | def test_ls_nonexistent_directory(self, sandbox: SandboxBackendProtoco...
method test_ls_hidden_files (line 684) | def test_ls_hidden_files(self, sandbox: SandboxBackendProtocol) -> None:
method test_ls_directory_with_spaces (line 698) | def test_ls_directory_with_spaces(self, sandbox: SandboxBackendProtoco...
method test_ls_unicode_filenames (line 712) | def test_ls_unicode_filenames(self, sandbox: SandboxBackendProtocol) -...
method test_ls_large_directory (line 726) | def test_ls_large_directory(self, sandbox: SandboxBackendProtocol) -> ...
method test_ls_path_with_trailing_slash (line 746) | def test_ls_path_with_trailing_slash(self, sandbox: SandboxBackendProt...
method test_ls_special_characters_in_filenames (line 760) | def test_ls_special_characters_in_filenames(
method test_grep_basic_search (line 781) | def test_grep_basic_search(self, sandbox: SandboxBackendProtocol) -> N...
method test_grep_with_glob_pattern (line 801) | def test_grep_with_glob_pattern(self, sandbox: SandboxBackendProtocol)...
method test_grep_no_matches (line 815) | def test_grep_no_matches(self, sandbox: SandboxBackendProtocol) -> None:
method test_grep_multiple_matches_per_file (line 826) | def test_grep_multiple_matches_per_file(
method test_grep_literal_string_matching (line 843) | def test_grep_literal_string_matching(
method test_grep_unicode_pattern (line 858) | def test_grep_unicode_pattern(self, sandbox: SandboxBackendProtocol) -...
method test_grep_case_sensitivity (line 870) | def test_grep_case_sensitivity(self, sandbox: SandboxBackendProtocol) ...
method test_grep_with_special_characters (line 883) | def test_grep_with_special_characters(
method test_grep_empty_directory (line 905) | def test_grep_empty_directory(self, sandbox: SandboxBackendProtocol) -...
method test_grep_across_nested_directories (line 915) | def test_grep_across_nested_directories(
method test_grep_with_multiline_matches (line 931) | def test_grep_with_multiline_matches(self, sandbox: SandboxBackendProt...
method test_glob_basic_pattern (line 946) | def test_glob_basic_pattern(self, sandbox: SandboxBackendProtocol) -> ...
method test_glob_recursive_pattern (line 963) | def test_glob_recursive_pattern(self, sandbox: SandboxBackendProtocol)...
method test_glob_no_matches (line 979) | def test_glob_no_matches(self, sandbox: SandboxBackendProtocol) -> None:
method test_glob_with_directories (line 990) | def test_glob_with_directories(self, sandbox: SandboxBackendProtocol) ...
method test_glob_specific_extension (line 1006) | def test_glob_specific_extension(self, sandbox: SandboxBackendProtocol...
method test_glob_hidden_files_explicitly (line 1020) | def test_glob_hidden_files_explicitly(
method test_glob_with_character_class (line 1039) | def test_glob_with_character_class(self, sandbox: SandboxBackendProtoc...
method test_glob_with_question_mark (line 1058) | def test_glob_with_question_mark(self, sandbox: SandboxBackendProtocol...
method test_glob_multiple_extensions (line 1074) | def test_glob_multiple_extensions(self, sandbox: SandboxBackendProtoco...
method test_glob_deeply_nested_pattern (line 1092) | def test_glob_deeply_nested_pattern(self, sandbox: SandboxBackendProto...
method test_glob_with_no_path_argument (line 1105) | def test_glob_with_no_path_argument(self, sandbox: SandboxBackendProto...
method test_write_read_edit_workflow (line 1119) | def test_write_read_edit_workflow(self, sandbox: SandboxBackendProtoco...
method test_complex_directory_operations (line 1146) | def test_complex_directory_operations(
FILE: libs/cli/tests/unit_tests/conftest.py
function _warm_model_caches (line 15) | def _warm_model_caches() -> None:
function _clear_langsmith_env (line 35) | def _clear_langsmith_env(monkeypatch: pytest.MonkeyPatch) -> None:
function _isolate_history (line 58) | def _isolate_history(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) ->...
FILE: libs/cli/tests/unit_tests/skills/test_commands.py
class TestValidateSkillName (line 26) | class TestValidateSkillName:
method test_valid_skill_names (line 29) | def test_valid_skill_names(self):
method test_invalid_names_per_spec (line 50) | def test_invalid_names_per_spec(self):
method test_path_traversal_attacks (line 65) | def test_path_traversal_attacks(self):
method test_absolute_paths (line 83) | def test_absolute_paths(self):
method test_path_separators (line 96) | def test_path_separators(self):
method test_invalid_characters (line 109) | def test_invalid_characters(self):
method test_unicode_lowercase_accepted (line 136) | def test_unicode_lowercase_accepted(self) -> None:
method test_unicode_uppercase_rejected (line 153) | def test_unicode_uppercase_rejected(self) -> None:
method test_cjk_rejected (line 164) | def test_cjk_rejected(self) -> None:
method test_emoji_rejected (line 175) | def test_emoji_rejected(self) -> None:
method test_empty_names (line 186) | def test_empty_names(self):
class TestValidateSkillPath (line 200) | class TestValidateSkillPath:
method test_valid_path_within_base (line 203) | def test_valid_path_within_base(self, tmp_path: Path) -> None:
method test_path_traversal_outside_base (line 213) | def test_path_traversal_outside_base(self, tmp_path: Path) -> None:
method test_symlink_path_traversal (line 224) | def test_symlink_path_traversal(self, tmp_path: Path) -> None:
method test_nonexistent_path_validation (line 244) | def test_nonexistent_path_validation(self, tmp_path: Path) -> None:
class TestIntegrationSecurity (line 256) | class TestIntegrationSecurity:
method test_combined_validation (line 259) | def test_combined_validation(self, tmp_path: Path) -> None:
class TestGenerateTemplate (line 288) | class TestGenerateTemplate:
method test_template_parseable_by_middleware (line 295) | def test_template_parseable_by_middleware(self):
method test_template_body_has_no_when_to_use_section (line 310) | def test_template_body_has_no_when_to_use_section(self):
method test_template_description_includes_trigger_guidance (line 322) | def test_template_description_includes_trigger_guidance(self):
function _make_skill (line 334) | def _make_skill(
class TestFormatInfoFields (line 369) | class TestFormatInfoFields:
method test_all_fields_present (line 372) | def test_all_fields_present(self) -> None:
method test_no_optional_fields (line 394) | def test_no_optional_fields(self) -> None:
method test_license_only (line 400) | def test_license_only(self) -> None:
method test_compatibility_only (line 407) | def test_compatibility_only(self) -> None:
method test_allowed_tools_only (line 414) | def test_allowed_tools_only(self) -> None:
method test_metadata_only (line 421) | def test_metadata_only(self) -> None:
method test_field_order (line 428) | def test_field_order(self) -> None:
class TestSkillsHelpFlag (line 446) | class TestSkillsHelpFlag:
method test_skills_help_shows_subcommands (line 449) | def test_skills_help_shows_subcommands(self) -> None:
method test_skills_list_help_shows_list_options (line 480) | def test_skills_list_help_shows_list_options(self) -> None:
class TestThreadsHelpFlag (line 504) | class TestThreadsHelpFlag:
method test_threads_help_shows_threads_content (line 507) | def test_threads_help_shows_threads_content(self) -> None:
class TestThreadsListAlias (line 536) | class TestThreadsListAlias:
method test_threads_ls_alias_parsed (line 539) | def test_threads_ls_alias_parsed(self) -> None:
method test_threads_list_still_works (line 546) | def test_threads_list_still_works(self) -> None:
class TestSkillsListAlias (line 554) | class TestSkillsListAlias:
method test_skills_ls_alias_parsed (line 557) | def test_skills_ls_alias_parsed(self) -> None:
method test_skills_list_still_works (line 564) | def test_skills_list_still_works(self) -> None:
class TestInfoShadowWarning (line 572) | class TestInfoShadowWarning:
method _make_skill_dir (line 575) | def _make_skill_dir(self, parent: Path, name: str, description: str) -...
method test_shadow_note_shown_when_project_overrides_user (line 589) | def test_shadow_note_shown_when_project_overrides_user(
method test_no_shadow_note_when_no_conflict (line 628) | def test_no_shadow_note_when_no_conflict(self, tmp_path: Path) -> None:
class TestInfoBuiltInSkill (line 666) | class TestInfoBuiltInSkill:
method _make_skill_dir (line 669) | def _make_skill_dir(self, parent: Path, name: str, description: str) -...
method test_built_in_skill_shows_correct_label (line 683) | def test_built_in_skill_shows_correct_label(self, tmp_path: Path) -> N...
method test_built_in_skill_no_shadow_warning (line 719) | def test_built_in_skill_no_shadow_warning(self, tmp_path: Path) -> None:
class TestListBuiltInSkillsDisplay (line 759) | class TestListBuiltInSkillsDisplay:
method _make_skill_dir (line 762) | def _make_skill_dir(self, parent: Path, name: str, description: str) -...
method test_built_in_section_rendered (line 770) | def test_built_in_section_rendered(self, tmp_path: Path) -> None:
method test_built_in_section_omits_path (line 806) | def test_built_in_section_omits_path(self, tmp_path: Path) -> None:
class TestSkillsLsDispatch (line 843) | class TestSkillsLsDispatch:
method test_ls_dispatches_to_list (line 846) | def test_ls_dispatches_to_list(self, tmp_path: Path) -> None:
class TestDeleteSkill (line 886) | class TestDeleteSkill:
method _create_test_skill (line 890) | def _create_test_skill(skills_dir: Path, skill_name: str) -> Path:
method test_delete_existing_skill_with_force (line 915) | def test_delete_existing_skill_with_force(self, tmp_path: Path) -> None:
method test_delete_nonexistent_skill (line 933) | def test_delete_nonexistent_skill(self, tmp_path: Path) -> None:
method test_delete_with_confirmation_accepted (line 961) | def test_delete_with_confirmation_accepted(
method test_delete_with_confirmation_no (line 982) | def test_delete_with_confirmation_no(self, tmp_path: Path) -> None:
method test_delete_with_confirmation_empty_input (line 1001) | def test_delete_with_confirmation_empty_input(self, tmp_path: Path) ->...
method test_delete_with_keyboard_interrupt (line 1020) | def test_delete_with_keyboard_interrupt(self, tmp_path: Path) -> None:
method test_delete_with_eof_error (line 1039) | def test_delete_with_eof_error(self, tmp_path: Path) -> None:
method test_delete_invalid_skill_name (line 1058) | def test_delete_invalid_skill_name(self, tmp_path: Path) -> None:
method test_delete_project_skill (line 1095) | def test_delete_project_skill(self, tmp_path: Path) -> None:
method test_delete_project_skill_not_in_project (line 1114) | def test_delete_project_skill_not_in_project(self, tmp_path: Path) -> ...
method test_delete_skill_with_supporting_files (line 1137) | def test_delete_skill_with_supporting_files(self, tmp_path: Path) -> N...
method test_delete_skill_for_specific_agent (line 1163) | def test_delete_skill_for_specific_agent(self, tmp_path: Path) -> None:
method test_delete_rmtree_os_error (line 1187) | def test_delete_rmtree_os_error(self, tmp_path: Path) -> None:
method test_delete_refuses_when_base_dir_none (line 1221) | def test_delete_refuses_when_base_dir_none(self, tmp_path: Path) -> None:
class TestDeleteArgparsing (line 1251) | class TestDeleteArgparsing:
method test_delete_args_parsed (line 1254) | def test_delete_args_parsed(self) -> None:
method test_delete_args_defaults (line 1267) | def test_delete_args_defaults(self) -> None:
method test_delete_help_shows_delete_options (line 1275) | def test_delete_help_shows_delete_options(self) -> None:
method test_execute_skills_command_dispatches_delete (line 1292) | def test_execute_skills_command_dispatches_delete(self, tmp_path: Path...
FILE: libs/cli/tests/unit_tests/skills/test_load.py
function _create_skill (line 11) | def _create_skill(skill_dir: Path, name: str, description: str) -> None:
class TestListSkillsSingleDirectory (line 28) | class TestListSkillsSingleDirectory:
method test_list_skills_empty_directory (line 31) | def test_list_skills_empty_directory(self, tmp_path: Path) -> None:
method test_list_skills_with_valid_skill (line 39) | def test_list_skills_with_valid_skill(self, tmp_path: Path) -> None:
method test_list_skills_source_parameter (line 65) | def test_list_skills_source_parameter(self, tmp_path: Path) -> None:
method test_list_skills_missing_frontmatter (line 87) | def test_list_skills_missing_frontmatter(self, tmp_path: Path) -> None:
method test_list_skills_missing_required_fields (line 101) | def test_list_skills_missing_required_fields(self, tmp_path: Path) -> ...
method test_list_skills_nonexistent_directory (line 127) | def test_list_skills_nonexistent_directory(self, tmp_path: Path) -> None:
class TestListSkillsMultipleDirectories (line 134) | class TestListSkillsMultipleDirectories:
method test_list_skills_user_only (line 137) | def test_list_skills_user_only(self, tmp_path: Path) -> None:
method test_list_skills_project_only (line 156) | def test_list_skills_project_only(self, tmp_path: Path) -> None:
method test_list_skills_both_sources (line 175) | def test_list_skills_both_sources(self, tmp_path: Path) -> None:
method test_list_skills_project_overrides_user (line 215) | def test_list_skills_project_overrides_user(self, tmp_path: Path) -> N...
method test_list_skills_empty_directories (line 250) | def test_list_skills_empty_directories(self, tmp_path: Path) -> None:
method test_list_skills_no_directories (line 260) | def test_list_skills_no_directories(self):
method test_list_skills_multiple_user_skills (line 265) | def test_list_skills_multiple_user_skills(self, tmp_path: Path) -> None:
method test_list_skills_mixed_valid_invalid (line 286) | def test_list_skills_mixed_valid_invalid(self, tmp_path: Path) -> None:
class TestListSkillsAliasDirectories (line 315) | class TestListSkillsAliasDirectories:
method test_user_agent_skills_dir_precedence (line 318) | def test_user_agent_skills_dir_precedence(self, tmp_path: Path) -> None:
method test_project_agent_skills_dir_precedence (line 347) | def test_project_agent_skills_dir_precedence(self, tmp_path: Path) -> ...
method test_full_precedence_chain (line 376) | def test_full_precedence_chain(self, tmp_path: Path) -> None:
method test_mixed_sources_with_aliases (line 417) | def test_mixed_sources_with_aliases(self, tmp_path: Path) -> None:
method test_alias_directories_only (line 468) | def test_alias_directories_only(self, tmp_path: Path) -> None:
method test_nonexistent_alias_directories (line 495) | def test_nonexistent_alias_directories(self, tmp_path: Path) -> None:
class TestListSkillsBuiltIn (line 510) | class TestListSkillsBuiltIn:
method test_built_in_skills_discovered (line 513) | def test_built_in_skills_discovered(self, tmp_path: Path) -> None:
method test_built_in_lowest_precedence (line 531) | def test_built_in_lowest_precedence(self, tmp_path: Path) -> None:
method test_project_overrides_built_in (line 557) | def test_project_overrides_built_in(self, tmp_path: Path) -> None:
method test_built_in_coexists_with_other_skills (line 583) | def test_built_in_coexists_with_other_skills(self, tmp_path: Path) -> ...
method test_nonexistent_built_in_dir (line 622) | def test_nonexistent_built_in_dir(self, tmp_path: Path) -> None:
method test_real_skill_creator_ships (line 633) | def test_real_skill_creator_ships(self) -> None:
method test_oserror_in_one_source_does_not_break_others (line 659) | def test_oserror_in_one_source_does_not_break_others(self, tmp_path: P...
FILE: libs/cli/tests/unit_tests/skills/test_skills_json.py
class TestSkillsListJson (line 11) | class TestSkillsListJson:
method test_json_output_with_skills (line 14) | def test_json_output_with_skills(self, tmp_path: Path) -> None:
method test_json_output_empty (line 43) | def test_json_output_empty(self, tmp_path: Path) -> None:
class TestSkillsInfoJson (line 63) | class TestSkillsInfoJson:
method test_json_output (line 66) | def test_json_output(self, tmp_path: Path) -> None:
class TestSkillsCreateJson (line 95) | class TestSkillsCreateJson:
method test_json_output (line 98) | def test_json_output(self, tmp_path: Path) -> None:
class TestSkillsDeleteJson (line 119) | class TestSkillsDeleteJson:
method test_json_output (line 122) | def test_json_output(self, tmp_path: Path) -> None:
FILE: libs/cli/tests/unit_tests/test_agent.py
function _make_fake_chat_model (line 32) | def _make_fake_chat_model() -> GenericFakeChatModel:
function test_format_write_file_description_create_new_file (line 39) | def test_format_write_file_description_create_new_file(tmp_path: Path) -...
function test_format_write_file_description_overwrite_existing_file (line 63) | def test_format_write_file_description_overwrite_existing_file(tmp_path:...
function test_format_edit_file_description_single_occurrence (line 89) | def test_format_edit_file_description_single_occurrence():
function test_format_edit_file_description_all_occurrences (line 113) | def test_format_edit_file_description_all_occurrences():
function test_format_web_search_description (line 137) | def test_format_web_search_description():
function test_format_web_search_description_default_max_results (line 160) | def test_format_web_search_description_default_max_results():
function test_format_fetch_url_description (line 181) | def test_format_fetch_url_description():
function test_format_fetch_url_description_default_timeout (line 205) | def test_format_fetch_url_description_default_timeout():
function test_format_task_description (line 226) | def test_format_task_description():
function test_format_task_description_truncates_long_description (line 254) | def test_format_task_description_truncates_long_description():
function test_format_execute_description (line 279) | def test_format_execute_description():
function test_format_execute_description_with_hidden_unicode (line 300) | def test_format_execute_description_with_hidden_unicode():
function test_format_fetch_url_description_with_suspicious_url (line 319) | def test_format_fetch_url_description_with_suspicious_url():
function test_format_fetch_url_description_with_hidden_unicode_in_url (line 335) | def test_format_fetch_url_description_with_hidden_unicode_in_url():
class TestGetSystemPromptModelIdentity (line 352) | class TestGetSystemPromptModelIdentity:
method test_includes_model_identity_when_all_settings_present (line 355) | def test_includes_model_identity_when_all_settings_present(self) -> None:
method test_excludes_model_identity_when_model_name_is_none (line 370) | def test_excludes_model_identity_when_model_name_is_none(self) -> None:
method test_excludes_provider_when_not_set (line 382) | def test_excludes_provider_when_not_set(self) -> None:
method test_excludes_context_limit_when_not_set (line 397) | def test_excludes_context_limit_when_not_set(self) -> None:
method test_model_identity_with_only_model_name (line 412) | def test_model_identity_with_only_model_name(self) -> None:
class TestGetSystemPromptNonInteractive (line 428) | class TestGetSystemPromptNonInteractive:
method test_interactive_prompt_mentions_interactive_cli (line 431) | def test_interactive_prompt_mentions_interactive_cli(self) -> None:
method test_non_interactive_prompt_mentions_headless (line 441) | def test_non_interactive_prompt_mentions_headless(self) -> None:
method test_non_interactive_prompt_does_not_ask_questions (line 451) | def test_non_interactive_prompt_does_not_ask_questions(self) -> None:
method test_non_interactive_prompt_instructs_autonomous_execution (line 460) | def test_non_interactive_prompt_instructs_autonomous_execution(self) -...
method test_non_interactive_prompt_requires_non_interactive_commands (line 470) | def test_non_interactive_prompt_requires_non_interactive_commands(self...
method test_default_is_interactive (line 480) | def test_default_is_interactive(self) -> None:
class TestGetSystemPromptCwdOSError (line 490) | class TestGetSystemPromptCwdOSError:
method test_falls_back_on_cwd_oserror (line 493) | def test_falls_back_on_cwd_oserror(self) -> None:
class TestGetSystemPromptPlaceholderValidation (line 507) | class TestGetSystemPromptPlaceholderValidation:
method test_no_unreplaced_placeholders_in_interactive (line 510) | def test_no_unreplaced_placeholders_in_interactive(self) -> None:
method test_no_unreplaced_placeholders_in_non_interactive (line 522) | def test_no_unreplaced_placeholders_in_non_interactive(self) -> None:
class TestCreateCliAgentInteractiveForwarding (line 534) | class TestCreateCliAgentInteractiveForwarding:
method test_forwards_interactive_false_to_get_system_prompt (line 537) | def test_forwards_interactive_false_to_get_system_prompt(
method test_explicit_system_prompt_ignores_interactive (line 591) | def test_explicit_system_prompt_ignores_interactive(self, tmp_path: Pa...
class TestDefaultAgentName (line 643) | class TestDefaultAgentName:
method test_default_agent_name_value (line 646) | def test_default_agent_name_value(self) -> None:
class TestListAgents (line 655) | class TestListAgents:
method test_default_agent_marked (line 658) | def test_default_agent_marked(self, tmp_path: Path) -> None:
method test_non_default_agent_not_marked (line 700) | def test_non_default_agent_not_marked(self, tmp_path: Path) -> None:
class TestListAgentsJson (line 729) | class TestListAgentsJson:
method test_json_output_with_agents (line 732) | def test_json_output_with_agents(self, tmp_path: Path) -> None:
method test_json_output_empty (line 771) | def test_json_output_empty(self, tmp_path: Path) -> None:
class TestResetAgentJson (line 793) | class TestResetAgentJson:
method test_json_output_default_reset (line 796) | def test_json_output_default_reset(self, tmp_path: Path) -> None:
class TestCreateCliAgentSkillsSources (line 823) | class TestCreateCliAgentSkillsSources:
method test_skills_source_precedence_order (line 826) | def test_skills_source_precedence_order(self, tmp_path: Path) -> None:
class TestCreateCliAgentMemorySources (line 903) | class TestCreateCliAgentMemorySources:
method test_project_agent_md_paths_in_memory_sources (line 906) | def test_project_agent_md_paths_in_memory_sources(self, tmp_path: Path...
method test_empty_project_paths_no_extra_sources (line 978) | def test_empty_project_paths_no_extra_sources(self, tmp_path: Path) ->...
class TestCreateCliAgentProjectContext (line 1041) | class TestCreateCliAgentProjectContext:
method test_project_context_drives_project_skills_and_subagents (line 1044) | def test_project_context_drives_project_skills_and_subagents(
method test_project_context_drives_project_agents_md_paths (line 1126) | def test_project_context_drives_project_agents_md_paths(
method test_project_context_sets_local_shell_root_dir (line 1199) | def test_project_context_sets_local_shell_root_dir(self, tmp_path: Pat...
method test_cwd_sets_local_filesystem_root_dir_without_shell (line 1256) | def test_cwd_sets_local_filesystem_root_dir_without_shell(
class TestMiddlewareStackConformance (line 1308) | class TestMiddlewareStackConformance:
method test_all_middleware_inherit_agent_middleware (line 1311) | def test_all_middleware_inherit_agent_middleware(self, tmp_path: Path)...
class TestLoadAsyncSubagents (line 1378) | class TestLoadAsyncSubagents:
method test_returns_empty_when_no_file (line 1379) | def test_returns_empty_when_no_file(self, tmp_path: Path) -> None:
method test_returns_empty_when_no_section (line 1383) | def test_returns_empty_when_no_section(self, tmp_path: Path) -> None:
method test_loads_valid_async_subagent (line 1389) | def test_loads_valid_async_subagent(self, tmp_path: Path) -> None:
method test_loads_multiple_subagents (line 1404) | def test_loads_multiple_subagents(self, tmp_path: Path) -> None:
method test_skips_entry_missing_required_fields (line 1422) | def test_skips_entry_missing_required_fields(self, tmp_path: Path) -> ...
method test_includes_optional_headers (line 1430) | def test_includes_optional_headers(self, tmp_path: Path) -> None:
method test_handles_invalid_toml (line 1445) | def test_handles_invalid_toml(self, tmp_path: Path) -> None:
class TestLsEntriesShim (line 1452) | class TestLsEntriesShim:
method test_remove_ls_entries_shim_when_sdk_pin_is_bumped (line 1460) | def test_remove_ls_entries_shim_when_sdk_pin_is_bumped(self) -> None:
FILE: libs/cli/tests/unit_tests/test_app.py
class TestInitialPromptOnMount (line 46) | class TestInitialPromptOnMount:
method test_initial_prompt_triggers_handle_user_message (line 49) | async def test_initial_prompt_triggers_handle_user_message(self) -> None:
class TestAppCSSValidation (line 73) | class TestAppCSSValidation:
method test_app_css_validates_on_mount (line 76) | async def test_app_css_validates_on_mount(self) -> None:
class TestThreadCachePrewarm (line 90) | class TestThreadCachePrewarm:
method test_prewarm_uses_current_thread_limit (line 93) | async def test_prewarm_uses_current_thread_limit(self) -> None:
method test_show_thread_selector_uses_cached_rows (line 108) | async def test_show_thread_selector_uses_cached_rows(self) -> None:
class TestAppBindings (line 146) | class TestAppBindings:
method test_ctrl_c_binding_has_priority (line 149) | def test_ctrl_c_binding_has_priority(self) -> None:
method test_toggle_tool_output_has_ctrl_o_binding (line 159) | def test_toggle_tool_output_has_ctrl_o_binding(self) -> None:
method test_ctrl_e_not_bound (line 169) | def test_ctrl_e_not_bound(self) -> None:
class TestITerm2CursorGuide (line 176) | class TestITerm2CursorGuide:
method test_escape_sequences_are_valid (line 179) | def test_escape_sequences_are_valid(self) -> None:
method test_write_iterm_escape_does_nothing_when_not_iterm (line 192) | def test_write_iterm_escape_does_nothing_when_not_iterm(self) -> None:
method test_write_iterm_escape_writes_sequence_when_iterm (line 202) | def test_write_iterm_escape_writes_sequence_when_iterm(self) -> None:
method test_write_iterm_escape_handles_oserror_gracefully (line 212) | def test_write_iterm_escape_handles_oserror_gracefully(self) -> None:
method test_write_iterm_escape_handles_none_stderr (line 222) | def test_write_iterm_escape_handles_none_stderr(self) -> None:
class TestITerm2Detection (line 231) | class TestITerm2Detection:
method test_detection_requires_tty (line 234) | def test_detection_requires_tty(self) -> None:
method test_detection_via_lc_terminal (line 253) | def test_detection_via_lc_terminal(self) -> None:
method test_detection_via_term_program (line 271) | def test_detection_via_term_program(self) -> None:
class TestModalScreenEscapeDismissal (line 289) | class TestModalScreenEscapeDismissal:
method test_escape_dismisses_modal_screen (line 293) | async def test_escape_dismisses_modal_screen() -> None:
class TestModalScreenCtrlDHandling (line 352) | class TestModalScreenCtrlDHandling:
method test_ctrl_d_deletes_in_thread_selector_instead_of_quitting (line 355) | async def test_ctrl_d_deletes_in_thread_selector_instead_of_quitting(s...
method test_escape_closes_thread_delete_confirm_without_dismissing_modal (line 392) | async def test_escape_closes_thread_delete_confirm_without_dismissing_...
method test_ctrl_d_twice_quits_from_delete_confirmation (line 435) | async def test_ctrl_d_twice_quits_from_delete_confirmation(self) -> None:
method test_ctrl_c_still_works_from_delete_confirmation (line 489) | async def test_ctrl_c_still_works_from_delete_confirmation(self) -> None:
method test_ctrl_d_quits_from_model_selector_with_input_focused (line 541) | async def test_ctrl_d_quits_from_model_selector_with_input_focused(
method test_ctrl_d_quits_from_mcp_viewer (line 567) | async def test_ctrl_d_quits_from_mcp_viewer(self) -> None:
class TestModalScreenShiftTabHandling (line 600) | class TestModalScreenShiftTabHandling:
method test_shift_tab_moves_backward_in_thread_selector (line 603) | async def test_shift_tab_moves_backward_in_thread_selector(self) -> None:
class TestModalScreenCtrlCHandling (line 641) | class TestModalScreenCtrlCHandling:
method test_ctrl_c_quits_from_thread_selector_with_input_focused (line 644) | async def test_ctrl_c_quits_from_thread_selector_with_input_focused(
method test_ctrl_c_quits_from_model_selector_with_input_focused (line 689) | async def test_ctrl_c_quits_from_model_selector_with_input_focused(
method test_ctrl_c_quits_from_mcp_viewer (line 726) | async def test_ctrl_c_quits_from_mcp_viewer(self) -> None:
class TestMountMessageNoMatches (line 770) | class TestMountMessageNoMatches:
method test_mount_message_no_crash_when_messages_missing (line 778) | async def test_mount_message_no_crash_when_messages_missing(self) -> N...
method test_mount_error_message_no_crash_when_messages_missing (line 799) | async def test_mount_error_message_no_crash_when_messages_missing(
class TestQueuedMessage (line 819) | class TestQueuedMessage:
method test_frozen (line 822) | def test_frozen(self) -> None:
method test_fields (line 828) | def test_fields(self) -> None:
class TestMessageQueue (line 835) | class TestMessageQueue:
method test_message_queued_when_agent_running (line 838) | async def test_message_queued_when_agent_running(self) -> None:
method test_message_queued_while_connecting (line 852) | async def test_message_queued_while_connecting(self) -> None:
method test_message_blocked_while_thread_switching (line 867) | async def test_message_blocked_while_thread_switching(self) -> None:
method test_queued_widget_mounted (line 886) | async def test_queued_widget_mounted(self) -> None:
method test_immediate_processing_when_agent_idle (line 900) | async def test_immediate_processing_when_agent_idle(self) -> None:
method test_fifo_order (line 916) | async def test_fifo_order(self) -> None:
method test_escape_pops_last_queued_message (line 932) | async def test_escape_pops_last_queued_message(self) -> None:
method test_escape_restores_text_to_empty_input (line 963) | async def test_escape_restores_text_to_empty_input(self) -> None:
method test_escape_preserves_existing_input_text (line 982) | async def test_escape_preserves_existing_input_text(self) -> None:
method test_escape_pop_shows_toast (line 1002) | async def test_escape_pop_shows_toast(self) -> None:
method test_escape_pop_single_then_interrupt (line 1030) | async def test_escape_pop_single_then_interrupt(self) -> None:
method test_escape_pop_handles_widget_desync (line 1049) | async def test_escape_pop_handles_widget_desync(self) -> None:
method test_interrupt_dismisses_completion_without_stopping_agent (line 1065) | async def test_interrupt_dismisses_completion_without_stopping_agent(s...
method test_interrupt_falls_through_when_no_completion (line 1089) | async def test_interrupt_falls_through_when_no_completion(self) -> None:
method test_queue_cleared_on_ctrl_c (line 1107) | async def test_queue_cleared_on_ctrl_c(self) -> None:
method test_process_next_from_queue_removes_widget (line 1124) | async def test_process_next_from_queue_removes_widget(self) -> None:
method test_shell_command_continues_chain (line 1142) | async def test_shell_command_continues_chain(self) -> None:
class TestAskUserLifecycle (line 1164) | class TestAskUserLifecycle:
method test_request_ask_user_timeout_cleans_old_widget (line 1167) | async def test_request_ask_user_timeout_cleans_old_widget(self) -> None:
method test_on_ask_user_menu_answered_ignores_remove_errors (line 1185) | async def test_on_ask_user_menu_answered_ignores_remove_errors(self) -...
method test_on_ask_user_menu_cancelled_ignores_remove_errors (line 1201) | async def test_on_ask_user_menu_cancelled_ignores_remove_errors(self) ...
class TestTraceCommand (line 1218) | class TestTraceCommand:
method test_trace_opens_browser_when_configured (line 1221) | async def test_trace_opens_browser_when_configured(self) -> None:
method test_trace_shows_error_when_not_configured (line 1248) | async def test_trace_shows_error_when_not_configured(self) -> None:
method test_trace_shows_error_when_no_session (line 1265) | async def test_trace_shows_error_when_no_session(self) -> None:
method test_trace_shows_link_when_browser_fails (line 1278) | async def test_trace_shows_link_when_browser_fails(self) -> None:
method test_trace_shows_error_when_url_build_raises (line 1304) | async def test_trace_shows_error_when_url_build_raises(self) -> None:
method test_trace_routed_from_handle_command (line 1321) | async def test_trace_routed_from_handle_command(self) -> None:
class TestRunAgentTaskMediaTracker (line 1335) | class TestRunAgentTaskMediaTracker:
method test_run_agent_task_passes_image_tracker (line 1338) | async def test_run_agent_task_passes_image_tracker(self) -> None:
method test_run_agent_task_finalizes_pending_tools_on_error (line 1356) | async def test_run_agent_task_finalizes_pending_tools_on_error(self) -...
class TestAppFocusRestoresChatInput (line 1381) | class TestAppFocusRestoresChatInput:
method test_app_focus_restores_chat_input (line 1384) | async def test_app_focus_restores_chat_input(self) -> None:
method test_app_focus_skips_when_modal_open (line 1402) | async def test_app_focus_skips_when_modal_open(self) -> None:
method test_app_focus_skips_when_approval_pending (line 1423) | async def test_app_focus_skips_when_approval_pending(self) -> None:
class TestPasteRouting (line 1439) | class TestPasteRouting:
method test_on_paste_routes_unfocused_event_to_chat_input (line 1442) | async def test_on_paste_routes_unfocused_event_to_chat_input(self) -> ...
method test_on_paste_does_not_route_when_input_already_focused (line 1464) | async def test_on_paste_does_not_route_when_input_already_focused(self...
class TestShellCommandInterrupt (line 1487) | class TestShellCommandInterrupt:
method test_escape_cancels_shell_worker (line 1490) | async def test_escape_cancels_shell_worker(self) -> None:
method test_ctrl_c_cancels_shell_worker (line 1505) | async def test_ctrl_c_cancels_shell_worker(self) -> None:
method test_process_killed_on_cancelled_error (line 1524) | async def test_process_killed_on_cancelled_error(self) -> None:
method test_cleanup_clears_state (line 1549) | async def test_cleanup_clears_state(self) -> None:
method test_messages_queued_during_shell (line 1566) | async def test_messages_queued_during_shell(self) -> None:
method test_queue_drains_after_shell_completes (line 1579) | async def test_queue_drains_after_shell_completes(self) -> None:
method test_interrupted_shows_message (line 1602) | async def test_interrupted_shows_message(self) -> None:
method test_timeout_kills_and_shows_error (line 1623) | async def test_timeout_kills_and_shows_error(self) -> None:
method test_posix_killpg_called (line 1650) | async def test_posix_killpg_called(self) -> None:
method test_sigkill_escalation (line 1673) | async def test_sigkill_escalation(self) -> None:
method test_no_op_when_no_shell_running (line 1699) | async def test_no_op_when_no_shell_running(self) -> None:
method test_oserror_shows_error_message (line 1710) | async def test_oserror_shows_error_message(self) -> None:
method test_handle_shell_command_sets_running_state (line 1727) | async def test_handle_shell_command_sets_running_state(self) -> None:
method test_kill_noop_when_already_exited (line 1744) | async def test_kill_noop_when_already_exited(self) -> None:
method test_end_to_end_escape_during_shell (line 1761) | async def test_end_to_end_escape_during_shell(self) -> None:
class TestInterruptApprovalPriority (line 1778) | class TestInterruptApprovalPriority:
method test_escape_rejects_approval_before_canceling_worker (line 1781) | async def test_escape_rejects_approval_before_canceling_worker(self) -...
method test_escape_pops_queue_before_cancelling_worker (line 1799) | async def test_escape_pops_queue_before_cancelling_worker(self) -> None:
method test_escape_rejects_approval_when_no_worker (line 1835) | async def test_escape_rejects_approval_when_no_worker(self) -> None:
method test_ctrl_c_rejects_approval_before_canceling_worker (line 1851) | async def test_ctrl_c_rejects_approval_before_canceling_worker(self) -...
class TestIsUserTyping (line 1871) | class TestIsUserTyping:
method test_returns_false_when_never_typed (line 1874) | def test_returns_false_when_never_typed(self) -> None:
method test_returns_true_within_threshold (line 1879) | def test_returns_true_within_threshold(self) -> None:
method test_returns_false_after_threshold (line 1885) | def test_returns_false_after_threshold(self) -> None:
method test_boundary_just_within_threshold (line 1891) | def test_boundary_just_within_threshold(self) -> None:
class TestRequestApprovalBranching (line 1898) | class TestRequestApprovalBranching:
method test_placeholder_mounted_when_typing (line 1901) | async def test_placeholder_mounted_when_typing(self) -> None:
method test_placeholder_mount_failure_falls_back_to_menu (line 1950) | async def test_placeholder_mount_failure_falls_back_to_menu(self) -> N...
method test_menu_mounted_directly_when_not_typing (line 1993) | async def test_menu_mounted_directly_when_not_typing(self) -> None:
class TestDeferredShowApproval (line 2028) | class TestDeferredShowApproval:
method test_swaps_placeholder_for_menu_after_idle (line 2031) | async def test_swaps_placeholder_for_menu_after_idle(self) -> None:
method test_bails_if_placeholder_detached_and_cancels_future (line 2075) | async def test_bails_if_placeholder_detached_and_cancels_future(self) ...
method test_timeout_shows_approval_after_deadline (line 2108) | async def test_timeout_shows_approval_after_deadline(self) -> None:
class TestOnChatInputTyping (line 2153) | class TestOnChatInputTyping:
method test_sets_last_typed_at (line 2156) | def test_sets_last_typed_at(self) -> None:
method test_updates_on_subsequent_calls (line 2169) | def test_updates_on_subsequent_calls(self) -> None:
class TestOnApprovalMenuDecidedCleanup (line 2185) | class TestOnApprovalMenuDecidedCleanup:
method test_removes_attached_placeholder (line 2188) | async def test_removes_attached_placeholder(self) -> None:
method test_nulls_detached_placeholder (line 2211) | async def test_nulls_detached_placeholder(self) -> None:
method test_no_placeholder_works_normally (line 2227) | async def test_no_placeholder_works_normally(self) -> None:
class TestActionOpenEditor (line 2240) | class TestActionOpenEditor:
method test_updates_text_on_successful_edit (line 2243) | async def test_updates_text_on_successful_edit(self) -> None:
method test_no_update_when_editor_returns_none (line 2260) | async def test_no_update_when_editor_returns_none(self) -> None:
method test_early_return_when_chat_input_is_none (line 2277) | async def test_early_return_when_chat_input_is_none(self) -> None:
method test_early_return_when_text_area_is_none (line 2284) | async def test_early_return_when_text_area_is_none(self) -> None:
method test_notifies_on_exception (line 2292) | async def test_notifies_on_exception(self) -> None:
class TestEditorSlashCommand (line 2315) | class TestEditorSlashCommand:
method test_editor_command_calls_action (line 2318) | async def test_editor_command_calls_action(self) -> None:
class TestFetchThreadHistoryData (line 2326) | class TestFetchThreadHistoryData:
method test_dict_messages_converted_to_message_objects (line 2329) | async def test_dict_messages_converted_to_message_objects(self) -> None:
method test_server_mode_falls_back_to_checkpointer (line 2360) | async def test_server_mode_falls_back_to_checkpointer(self) -> None:
class TestRemoteAgent (line 2396) | class TestRemoteAgent:
method test_returns_instance_with_remote_agent (line 2399) | def test_returns_instance_with_remote_agent(self) -> None:
method test_none_when_agent_is_none (line 2407) | def test_none_when_agent_is_none(self) -> None:
method test_none_with_non_remote_agent (line 2411) | def test_none_with_non_remote_agent(self) -> None:
method test_none_with_mock_spec_pregel (line 2417) | def test_none_with_mock_spec_pregel(self) -> None:
class TestSlashCommandBypass (line 2424) | class TestSlashCommandBypass:
method test_quit_bypasses_queue_when_agent_running (line 2427) | async def test_quit_bypasses_queue_when_agent_running(self) -> None:
method test_quit_bypasses_queue_when_connecting (line 2441) | async def test_quit_bypasses_queue_when_connecting(self) -> None:
method test_quit_bypasses_thread_switching (line 2455) | async def test_quit_bypasses_thread_switching(self) -> None:
method test_q_alias_bypasses_queue (line 2468) | async def test_q_alias_bypasses_queue(self) -> None:
method test_version_executes_during_connecting (line 2482) | async def test_version_executes_during_connecting(self) -> None:
method test_version_queues_during_agent_running (line 2496) | async def test_version_queues_during_agent_running(self) -> None:
method test_model_no_args_opens_selector_during_agent_running (line 2509) | async def test_model_no_args_opens_selector_during_agent_running(self)...
method test_model_no_args_opens_selector_during_connecting (line 2523) | async def test_model_no_args_opens_selector_during_connecting(self) ->...
method test_model_with_args_still_queues (line 2536) | async def test_model_with_args_still_queues(self) -> None:
method test_threads_opens_selector_during_agent_running (line 2549) | async def test_threads_opens_selector_during_agent_running(self) -> None:
method test_threads_opens_selector_during_connecting (line 2563) | async def test_threads_opens_selector_during_connecting(self) -> None:
method test_threads_blocked_during_thread_switching (line 2576) | async def test_threads_blocked_during_thread_switching(self) -> None:
method test_model_blocked_during_thread_switching (line 2590) | async def test_model_blocked_during_thread_switching(self) -> None:
class TestBypassFrozensetDrift (line 2605) | class TestBypassFrozensetDrift:
method _handled_commands (line 2615) | def _handled_commands() -> set[str]:
method test_all_bypass_commands_are_handled (line 2632) | def test_all_bypass_commands_are_handled(self) -> None:
method test_all_handled_commands_are_classified (line 2651) | def test_all_handled_commands_are_classified(self) -> None:
class TestDeferredActions (line 2663) | class TestDeferredActions:
method test_deferred_actions_drain_after_agent_cleanup (line 2666) | async def test_deferred_actions_drain_after_agent_cleanup(self) -> None:
method test_deferred_actions_drain_after_shell_cleanup (line 2688) | async def test_deferred_actions_drain_after_shell_cleanup(self) -> None:
method test_deferred_actions_not_drained_while_connecting (line 2709) | async def test_deferred_actions_not_drained_while_connecting(self) -> ...
method test_deferred_actions_cleared_on_interrupt (line 2731) | async def test_deferred_actions_cleared_on_interrupt(self) -> None:
method test_deferred_actions_cleared_on_server_failure (line 2747) | async def test_deferred_actions_cleared_on_server_failure(self) -> None:
method test_failing_deferred_action_does_not_block_others (line 2767) | async def test_failing_deferred_action_does_not_block_others(self) -> ...
method test_defer_action_deduplicates_by_kind (line 2794) | async def test_defer_action_deduplicates_by_kind(self) -> None:
method test_can_bypass_queue_version_only_connecting (line 2815) | async def test_can_bypass_queue_version_only_connecting(self) -> None:
method test_can_bypass_queue_bare_model_bypasses (line 2841) | async def test_can_bypass_queue_bare_model_bypasses(self) -> None:
method test_can_bypass_queue_model_with_args_no_bypass (line 2849) | async def test_can_bypass_queue_model_with_args_no_bypass(self) -> None:
method test_model_with_args_still_queues (line 2857) | async def test_model_with_args_still_queues(self) -> None:
method test_side_effect_free_bypasses_queue (line 2870) | async def test_side_effect_free_bypasses_queue(self) -> None:
method test_queued_commands_do_not_bypass (line 2878) | async def test_queued_commands_do_not_bypass(self) -> None:
method test_can_bypass_queue_empty_string (line 2886) | async def test_can_bypass_queue_empty_string(self) -> None:
method test_defer_action_mixed_kinds_preserves_ordering (line 2893) | async def test_defer_action_mixed_kinds_preserves_ordering(self) -> None:
FILE: libs/cli/tests/unit_tests/test_approval.py
class TestCheckExpandableCommand (line 14) | class TestCheckExpandableCommand:
method test_shell_command_over_threshold_is_expandable (line 17) | def test_shell_command_over_threshold_is_expandable(self) -> None:
method test_shell_command_at_threshold_not_expandable (line 23) | def test_shell_command_at_threshold_not_expandable(self) -> None:
method test_shell_command_under_threshold_not_expandable (line 29) | def test_shell_command_under_threshold_not_expandable(self) -> None:
method test_execute_tool_is_expandable (line 34) | def test_execute_tool_is_expandable(self) -> None:
method test_non_shell_tool_not_expandable (line 40) | def test_non_shell_tool_not_expandable(self) -> None:
method test_multiple_requests_not_expandable (line 46) | def test_multiple_requests_not_expandable(self) -> None:
method test_missing_command_arg_not_expandable (line 57) | def test_missing_command_arg_not_expandable(self) -> None:
class TestGetCommandDisplay (line 63) | class TestGetCommandDisplay:
method test_short_command_shows_full (line 66) | def test_short_command_shows_full(self) -> None:
method test_long_command_truncated_when_not_expanded (line 73) | def test_long_command_truncated_when_not_expanded(self) -> None:
method test_long_command_shows_full_when_expanded (line 83) | def test_long_command_shows_full_when_expanded(self) -> None:
method test_short_command_shows_full_even_when_expanded_true (line 92) | def test_short_command_shows_full_even_when_expanded_true(self) -> None:
method test_command_at_boundary_plus_one_is_expandable (line 100) | def test_command_at_boundary_plus_one_is_expandable(self) -> None:
method test_none_command_value_handled (line 109) | def test_none_command_value_handled(self) -> None:
method test_integer_command_value_handled (line 116) | def test_integer_command_value_handled(self) -> None:
method test_command_display_escapes_markup_tags (line 123) | def test_command_display_escapes_markup_tags(self) -> None:
method test_command_display_with_hidden_unicode_shows_warning (line 130) | def test_command_display_with_hidden_unicode_shows_warning(self) -> None:
class TestToggleExpand (line 141) | class TestToggleExpand:
method test_toggle_changes_expanded_state (line 144) | def test_toggle_changes_expanded_state(self) -> None:
method test_toggle_updates_widget_with_correct_content (line 157) | def test_toggle_updates_widget_with_correct_content(self) -> None:
method test_toggle_does_nothing_for_non_expandable (line 178) | def test_toggle_does_nothing_for_non_expandable(self) -> None:
method test_toggle_does_nothing_without_widget (line 187) | def test_toggle_does_nothing_without_widget(self) -> None:
class TestToolSetConsistency (line 199) | class TestToolSetConsistency:
method test_bash_tool_is_expandable (line 202) | def test_bash_tool_is_expandable(self) -> None:
method test_bash_short_command_not_expandable (line 213) | def test_bash_short_command_not_expandable(self) -> None:
method test_execute_tool_is_minimal (line 218) | def test_execute_tool_is_minimal(self) -> None:
class TestSecurityWarnings (line 228) | class TestSecurityWarnings:
method test_collects_hidden_unicode_warning (line 231) | def test_collects_hidden_unicode_warning(self) -> None:
method test_collects_url_warning_for_suspicious_domain (line 237) | def test_collects_url_warning_for_suspicious_domain(self) -> None:
class TestGetCommandDisplayGuard (line 247) | class TestGetCommandDisplayGuard:
method test_raises_on_empty_action_requests (line 250) | def test_raises_on_empty_action_requests(self) -> None:
class TestOptionOrdering (line 259) | class TestOptionOrdering:
method test_decision_map_index_maps_to_correct_type (line 270) | def test_decision_map_index_maps_to_correct_type(
method test_action_select_sets_correct_index (line 292) | def test_action_select_sets_correct_index(
method test_key_binding_resolves_correct_decision (line 312) | async def test_key_binding_resolves_correct_decision(
FILE: libs/cli/tests/unit_tests/test_args.py
class TestInitialPromptArg (line 16) | class TestInitialPromptArg:
method test_short_flag (line 19) | def test_short_flag(self) -> None:
method test_long_flag (line 25) | def test_long_flag(self) -> None:
method test_no_flag (line 31) | def test_no_flag(self) -> None:
method test_with_other_args (line 37) | def test_with_other_args(self) -> None:
method test_empty_string (line 46) | def test_empty_string(self) -> None:
class TestResumeArg (line 53) | class TestResumeArg:
method test_short_flag_no_value (line 56) | def test_short_flag_no_value(self) -> None:
method test_short_flag_with_value (line 62) | def test_short_flag_with_value(self) -> None:
method test_long_flag_no_value (line 68) | def test_long_flag_no_value(self) -> None:
method test_long_flag_with_value (line 74) | def test_long_flag_with_value(self) -> None:
method test_no_flag (line 80) | def test_no_flag(self) -> None:
method test_with_other_args (line 86) | def test_with_other_args(self) -> None:
method test_resume_with_message (line 95) | def test_resume_with_message(self) -> None:
class TestTopLevelHelp (line 105) | class TestTopLevelHelp:
method test_top_level_help_exits_cleanly (line 108) | def test_top_level_help_exits_cleanly(self) -> None:
method test_help_subcommand_parses (line 127) | def test_help_subcommand_parses(self) -> None:
class TestSubcommandHelpFlags (line 137) | class TestSubcommandHelpFlags:
method _run_help (line 140) | def _run_help(
method test_list_help (line 165) | def test_list_help(self) -> None:
method test_reset_help (line 173) | def test_reset_help(self) -> None:
method test_threads_list_help (line 181) | def test_threads_list_help(self) -> None:
method test_threads_delete_help (line 189) | def test_threads_delete_help(self) -> None:
class TestShortFlags (line 198) | class TestShortFlags:
method test_short_agent_flag (line 201) | def test_short_agent_flag(self) -> None:
method test_short_model_flag (line 207) | def test_short_model_flag(self) -> None:
method test_agent_default_value (line 213) | def test_agent_default_value(self) -> None:
method test_short_version_flag (line 219) | def test_short_version_flag(self) -> None:
method test_short_auto_approve_flag (line 228) | def test_short_auto_approve_flag(self) -> None:
method test_short_shell_allow_list_flag (line 234) | def test_short_shell_allow_list_flag(self) -> None:
class TestQuietArg (line 241) | class TestQuietArg:
method test_short_flag (line 244) | def test_short_flag(self) -> None:
method test_long_flag (line 250) | def test_long_flag(self) -> None:
method test_no_flag_defaults_false (line 256) | def test_no_flag_defaults_false(self) -> None:
method test_combined_with_non_interactive (line 262) | def test_combined_with_non_interactive(self) -> None:
method test_quiet_without_non_interactive_parses (line 269) | def test_quiet_without_non_interactive_parses(self) -> None:
class TestNoMcpArg (line 281) | class TestNoMcpArg:
method test_no_mcp_flag_parsed (line 284) | def test_no_mcp_flag_parsed(self) -> None:
method test_no_mcp_default_false (line 290) | def test_no_mcp_default_false(self) -> None:
method test_no_mcp_and_mcp_config_mutual_exclusion (line 296) | def test_no_mcp_and_mcp_config_mutual_exclusion(self) -> None:
function test_default_agent_name_matches_canonical (line 314) | def test_default_agent_name_matches_canonical() -> None:
class TestHelpScreenDrift (line 319) | class TestHelpScreenDrift:
method test_all_parser_flags_appear_in_help (line 327) | def test_all_parser_flags_appear_in_help(self) -> None:
method test_threads_list_flags_appear_in_help (line 359) | def test_threads_list_flags_appear_in_help(self) -> None:
class TestJsonArg (line 394) | class TestJsonArg:
method test_default_text (line 397) | def test_default_text(self) -> None:
method test_json_shortcut (line 403) | def test_json_shortcut(self) -> None:
method test_json_before_subcommand (line 409) | def test_json_before_subcommand(self) -> None:
method test_json_after_subcommand (line 416) | def test_json_after_subcommand(self) -> None:
method test_output_format_flag_removed (line 423) | def test_output_format_flag_removed(self) -> None:
method test_json_after_nested_subcommand (line 432) | def test_json_after_nested_subcommand(self) -> None:
FILE: libs/cli/tests/unit_tests/test_ask_user.py
class _AskUserTestApp (line 18) | class _AskUserTestApp(App[None]):
method __init__ (line 19) | def __init__(self, questions: list[Question]) -> None:
method compose (line 23) | def compose(self) -> ComposeResult:
class TestAskUserToolDisplay (line 27) | class TestAskUserToolDisplay:
method test_format_single_question (line 30) | def test_format_single_question(self) -> None:
method test_format_multiple_questions (line 42) | def test_format_multiple_questions(self) -> None:
method test_format_empty_questions (line 59) | def test_format_empty_questions(self) -> None:
method test_format_no_questions_key (line 64) | def test_format_no_questions_key(self) -> None:
class TestAskUserMenu (line 69) | class TestAskUserMenu:
method test_find_menu_logs_when_hierarchy_is_missing (line 70) | def test_find_menu_logs_when_hierarchy_is_missing(
method test_text_input_receives_focus_on_mount (line 80) | async def test_text_input_receives_focus_on_mount(self) -> None:
method test_multiple_choice_question_widget_receives_focus_on_mount (line 90) | async def test_multiple_choice_question_widget_receives_focus_on_mount(
method test_text_question_submits_typed_answer (line 110) | async def test_text_question_submits_typed_answer(self) -> None:
method test_escape_cancels_and_resolves_future (line 131) | async def test_escape_cancels_and_resolves_future(self) -> None:
method test_multiple_choice_submits_without_text_input (line 150) | async def test_multiple_choice_submits_without_text_input(self) -> None:
method test_multiple_choice_other_accepts_custom_text (line 175) | async def test_multiple_choice_other_accepts_custom_text(self) -> None:
method test_enter_advances_sequentially_through_mc_questions (line 209) | async def test_enter_advances_sequentially_through_mc_questions(self) ...
method test_active_question_has_visual_indicator (line 269) | async def test_active_question_has_visual_indicator(self) -> None:
method test_tab_advances_to_next_question (line 286) | async def test_tab_advances_to_next_question(self) -> None:
method test_tab_clamps_at_last_question (line 310) | async def test_tab_clamps_at_last_question(self) -> None:
method test_tab_noop_for_single_question (line 333) | async def test_tab_noop_for_single_question(self) -> None:
method test_previous_question_navigates_backward (line 346) | async def test_previous_question_navigates_backward(self) -> None:
method test_previous_question_clamps_at_first (line 372) | async def test_previous_question_clamps_at_first(self) -> None:
method test_help_text_shows_tab_hint_for_multiple (line 390) | async def test_help_text_shows_tab_hint_for_multiple(self) -> None:
method test_help_text_omits_tab_hint_for_single (line 405) | async def test_help_text_omits_tab_hint_for_single(self) -> None:
method test_required_label_shown_for_required_question (line 415) | async def test_required_label_shown_for_required_question(self) -> None:
method test_required_label_hidden_for_optional_question (line 426) | async def test_required_label_hidden_for_optional_question(self) -> None:
method test_required_is_true_by_default (line 439) | async def test_required_is_true_by_default(self) -> None:
method test_optional_question_submits_with_empty_answer (line 451) | async def test_optional_question_submits_with_empty_answer(self) -> None:
method test_required_question_blocks_empty_submit (line 472) | async def test_required_question_blocks_empty_submit(self) -> None:
method test_up_from_other_input_selects_last_choice_directly (line 490) | async def test_up_from_other_input_selects_last_choice_directly(self) ...
method test_return_to_mc_other_refocuses_input (line 522) | async def test_return_to_mc_other_refocuses_input(self) -> None:
method test_cancel_after_submit_does_not_override_answer (line 558) | async def test_cancel_after_submit_does_not_override_answer(self) -> N...
method test_submit_after_cancel_does_not_override_cancel (line 582) | async def test_submit_after_cancel_does_not_override_cancel(self) -> N...
FILE: libs/cli/tests/unit_tests/test_ask_user_middleware.py
function _extract_tool_message_content (line 21) | def _extract_tool_message_content(command: Command[object]) -> str:
class TestValidateQuestions (line 32) | class TestValidateQuestions:
method test_rejects_empty_questions (line 35) | def test_rejects_empty_questions(self) -> None:
method test_rejects_empty_question_text (line 39) | def test_rejects_empty_question_text(self) -> None:
method test_rejects_multiple_choice_without_choices (line 43) | def test_rejects_multiple_choice_without_choices(self) -> None:
method test_rejects_text_question_with_choices (line 49) | def test_rejects_text_question_with_choices(self) -> None:
method test_accepts_valid_question_set (line 61) | def test_accepts_valid_question_set(self) -> None:
class TestParseAnswers (line 74) | class TestParseAnswers:
method test_parses_answered_payload (line 77) | def test_parses_answered_payload(self) -> None:
method test_cancelled_status_uses_cancelled_placeholder (line 86) | def test_cancelled_status_uses_cancelled_placeholder(self) -> None:
method test_error_status_uses_error_placeholder (line 94) | def test_error_status_uses_error_placeholder(self) -> None:
method test_malformed_payload_is_explicit_error (line 105) | def test_malformed_payload_is_explicit_error(self) -> None:
method test_missing_answers_on_answered_status_is_explicit_error (line 116) | def test_missing_answers_on_answered_status_is_explicit_error(self) ->...
method test_non_list_answers_payload_is_explicit_error (line 127) | def test_non_list_answers_payload_is_explicit_error(self) -> None:
method test_unknown_status_is_explicit_error (line 138) | def test_unknown_status_is_explicit_error(self) -> None:
method test_answer_count_mismatch_falls_back_to_no_answer (line 149) | def test_answer_count_mismatch_falls_back_to_no_answer(self) -> None:
class TestWrapModelCall (line 163) | class TestWrapModelCall:
method test_wrap_model_call_appends_system_prompt (line 166) | def test_wrap_model_call_appends_system_prompt(self) -> None:
method test_wrap_model_call_creates_system_prompt_when_missing (line 186) | def test_wrap_model_call_creates_system_prompt_when_missing(self) -> N...
method test_awrap_model_call_appends_system_prompt (line 203) | async def test_awrap_model_call_appends_system_prompt(self) -> None:
FILE: libs/cli/tests/unit_tests/test_autocomplete.py
class TestFuzzyScore (line 22) | class TestFuzzyScore:
method test_exact_filename_match_at_start (line 25) | def test_exact_filename_match_at_start(self):
method test_exact_filename_match_anywhere (line 30) | def test_exact_filename_match_anywhere(self):
method test_word_boundary_match (line 35) | def test_word_boundary_match(self):
method test_path_match_lower_than_filename (line 41) | def test_path_match_lower_than_filename(self):
method test_no_match_returns_low_score (line 47) | def test_no_match_returns_low_score(self):
method test_case_insensitive (line 52) | def test_case_insensitive(self):
method test_shorter_paths_preferred (line 59) | def test_shorter_paths_preferred(self):
method test_backslash_normalization (line 65) | def test_backslash_normalization(self):
method test_mixed_separator_normalization (line 72) | def test_mixed_separator_normalization(self):
class TestFuzzySearch (line 78) | class TestFuzzySearch:
method sample_files (line 82) | def sample_files(self):
method test_empty_query_returns_root_files_first (line 97) | def test_empty_query_returns_root_files_first(self, sample_files):
method test_exact_match_ranked_first (line 104) | def test_exact_match_ranked_first(self, sample_files):
method test_filters_dotfiles_by_default (line 109) | def test_filters_dotfiles_by_default(self, sample_files):
method test_includes_dotfiles_when_query_starts_with_dot (line 114) | def test_includes_dotfiles_when_query_starts_with_dot(self, sample_fil...
method test_respects_limit (line 119) | def test_respects_limit(self, sample_files):
method test_filters_low_score_matches (line 124) | def test_filters_low_score_matches(self, sample_files):
method test_utils_matches_multiple_files (line 129) | def test_utils_matches_multiple_files(self, sample_files):
class TestHelperFunctions (line 136) | class TestHelperFunctions:
method test_is_dotpath_detects_dotfiles (line 139) | def test_is_dotpath_detects_dotfiles(self):
method test_is_dotpath_allows_normal_files (line 145) | def test_is_dotpath_allows_normal_files(self):
method test_path_depth_counts_slashes (line 151) | def test_path_depth_counts_slashes(self):
class TestSlashCommandController (line 159) | class TestSlashCommandController:
method mock_view (line 163) | def mock_view(self):
method controller (line 168) | def controller(self, mock_view):
method test_can_handle_slash_prefix (line 172) | def test_can_handle_slash_prefix(self, controller):
method test_cannot_handle_non_slash (line 178) | def test_cannot_handle_non_slash(self, controller):
method test_filters_commands_by_prefix (line 184) | def test_filters_commands_by_prefix(self, controller, mock_view):
method test_filters_version_command_by_prefix (line 193) | def test_filters_version_command_by_prefix(self, controller, mock_view):
method test_shows_all_commands_on_slash_only (line 201) | def test_shows_all_commands_on_slash_only(self, controller, mock_view):
method test_clears_on_no_match (line 209) | def test_clears_on_no_match(self, controller, mock_view):
method test_reset_clears_state (line 219) | def test_reset_clears_state(self, controller, mock_view):
method test_suggestions_return_after_reset (line 226) | def test_suggestions_return_after_reset(self, controller, mock_view):
method test_hidden_keyword_match_continue (line 240) | def test_hidden_keyword_match_continue(self, controller, mock_view):
method test_substring_description_match_exit (line 248) | def test_substring_description_match_exit(self, controller, mock_view):
method test_substring_description_match_new (line 256) | def test_substring_description_match_new(self, controller, mock_view):
method test_substring_name_match (line 264) | def test_substring_name_match(self, controller, mock_view):
method test_true_fuzzy_match_via_misspelling (line 272) | def test_true_fuzzy_match_via_misspelling(self, controller, mock_view):
method test_prefix_match_ranks_first (line 280) | def test_prefix_match_ranks_first(self, controller, mock_view):
method test_no_match_clears (line 289) | def test_no_match_clears(self, controller, mock_view):
method test_double_reset_is_safe (line 298) | def test_double_reset_is_safe(self, controller):
class TestScoreCommand (line 306) | class TestScoreCommand:
method score (line 310) | def score(search: str, cmd: str, desc: str, keywords: str = "") -> float:
method test_prefix_returns_200 (line 314) | def test_prefix_returns_200(self):
method test_substring_name_returns_150 (line 317) | def test_substring_name_returns_150(self):
method test_substring_desc_word_boundary_returns_110 (line 320) | def test_substring_desc_word_boundary_returns_110(self):
method test_substring_desc_mid_word_returns_90 (line 323) | def test_substring_desc_mid_word_returns_90(self):
method test_no_match_returns_zero (line 327) | def test_no_match_returns_zero(self):
method test_fuzzy_above_threshold (line 330) | def test_fuzzy_above_threshold(self):
method test_hidden_keyword_prefix_match (line 334) | def test_hidden_keyword_prefix_match(self):
method test_hidden_keyword_substring_match (line 339) | def test_hidden_keyword_substring_match(self):
method test_hidden_keyword_ignored_when_empty (line 344) | def test_hidden_keyword_ignored_when_empty(self):
method test_hidden_keyword_requires_min_length (line 347) | def test_hidden_keyword_requires_min_length(self):
method test_tie
Copy disabled (too large)
Download .json
Condensed preview — 490 files, each showing path, character count, and a content snippet. Download the .json file for the full structured content (14,496K chars).
[
{
"path": ".github/CODEOWNERS",
"chars": 393,
"preview": "# This file defines code ownership for the Deep Agents repository.\n# Each line is a file pattern followed by one or more"
},
{
"path": ".github/ISSUE_TEMPLATE/bug-report.yml",
"chars": 4716,
"preview": "name: \"\\U0001F41B Bug Report\"\ndescription: Report a bug in Deep Agents. To report a security issue, please instead use t"
},
{
"path": ".github/ISSUE_TEMPLATE/config.yml",
"chars": 733,
"preview": "blank_issues_enabled: false\nversion: 2.1\ncontact_links:\n - name: 💬 Deep Agents Forum\n url: https://forum.langchain.c"
},
{
"path": ".github/ISSUE_TEMPLATE/feature-request.yml",
"chars": 2938,
"preview": "name: \"✨ Feature Request\"\ndescription: Request a new feature or enhancement for Deep Agents. For questions, please use t"
},
{
"path": ".github/ISSUE_TEMPLATE/privileged.yml",
"chars": 1498,
"preview": "name: \"\\U0001F512 Privileged\"\ndescription: You are a Deep Agents maintainer. If not, check the other options.\nbody:\n - "
},
{
"path": ".github/PULL_REQUEST_TEMPLATE.md",
"chars": 2075,
"preview": "Fixes #\n\n<!-- Replace everything above this line with a 1-2 sentence description of your change. Keep the \"Fixes #xx\" ke"
},
{
"path": ".github/RELEASING.md",
"chars": 12926,
"preview": "# CLI Release Process\n\nThis document describes the release process for the CLI package (`libs/cli`) in the Deep Agents m"
},
{
"path": ".github/actions/uv_setup/action.yml",
"chars": 1109,
"preview": "# Helper to set up Python and uv with caching\n\nname: uv-install\ndescription: Set up Python and uv with caching\n\ninputs:\n"
},
{
"path": ".github/dependabot.yml",
"chars": 598,
"preview": "version: 2\n\nupdates:\n - package-ecosystem: \"github-actions\"\n directory: \"/\"\n schedule:\n interval: \"weekly\"\n "
},
{
"path": ".github/scripts/aggregate_evals.py",
"chars": 4910,
"preview": "from __future__ import annotations\n\nimport glob\nimport json\nimport os\nimport sys\nfrom pathlib import Path\n\nfrom tabulate"
},
{
"path": ".github/scripts/check_extras_sync.py",
"chars": 2769,
"preview": "\"\"\"Check that optional extras stay in sync with required dependencies (openai).\n\nWhen a package appears in both [project"
},
{
"path": ".github/scripts/check_version_equality.py",
"chars": 2699,
"preview": "\"\"\"Check that pyproject.toml and _version.py versions stay in sync.\n\nPrevents releases with mismatched version numbers a"
},
{
"path": ".github/scripts/models.py",
"chars": 8630,
"preview": "\"\"\"Unified model registry for eval and harbor GitHub Actions workflows.\n\nSingle source of truth for all model definition"
},
{
"path": ".github/scripts/pr-labeler-config.json",
"chars": 2135,
"preview": "{\n \"org\": \"langchain-ai\",\n \"trustedThreshold\": 5,\n \"labelColor\": \"b76e79\",\n \"sizeThresholds\": [\n {\n \"label\":"
},
{
"path": ".github/scripts/pr-labeler.js",
"chars": 8962,
"preview": "// Shared helpers for pr_labeler.yml and tag-external-issues.yml.\n//\n// Usage from actions/github-script (requires actio"
},
{
"path": ".github/workflows/_benchmark.yml",
"chars": 1636,
"preview": "# Reusable workflow: CodSpeed wall-time benchmarks\n#\n# Runs pytest-benchmark tests under CodSpeed instrumentation so tha"
},
{
"path": ".github/workflows/_lint.yml",
"chars": 1286,
"preview": "# Reusable workflow for running linting\n\nname: \"🧹 Linting\"\n\non:\n workflow_call:\n inputs:\n working-directory:\n "
},
{
"path": ".github/workflows/_test.yml",
"chars": 1817,
"preview": "# Reusable workflow for running unit tests\n\nname: \"🧪 Unit Testing\"\n\non:\n workflow_call:\n inputs:\n working-direc"
},
{
"path": ".github/workflows/auto-label-by-package.yml",
"chars": 3083,
"preview": "name: Auto Label Issues by Package\n\non:\n issues:\n types: [opened, edited]\n\njobs:\n label-by-package:\n permissions"
},
{
"path": ".github/workflows/check_extras_sync.yml",
"chars": 1017,
"preview": "# Ensures optional extras stay in sync with required dependencies.\n#\n# When a package appears in both [project.dependenc"
},
{
"path": ".github/workflows/check_lockfiles.yml",
"chars": 730,
"preview": "# Check that all uv.lock files are up-to-date\n#\n# Prevents PRs from being merged when lockfiles are out of sync with pyp"
},
{
"path": ".github/workflows/check_sdk_pin.yml",
"chars": 6424,
"preview": "# Advisory check: posts a comment on CLI release PRs when the deepagents SDK\n# pin drifts from the actual SDK version. D"
},
{
"path": ".github/workflows/check_versions.yml",
"chars": 902,
"preview": "# Ensures version numbers in pyproject.toml and _version.py stay in sync.\n#\n# (Prevents releases with mismatched version"
},
{
"path": ".github/workflows/ci.yml",
"chars": 9898,
"preview": "# Main CI workflow for Deep Agents monorepo\n#\n# Runs on every pull request:\n# - Linting for changed packages\n# - Unit Te"
},
{
"path": ".github/workflows/deepagents-example.yml",
"chars": 10037,
"preview": "name: Deep Agents Example\n\non:\n issue_comment:\n types: [created]\n pull_request_review_comment:\n types: [created]"
},
{
"path": ".github/workflows/evals.yml",
"chars": 12553,
"preview": "# Daily evaluation workflow for Deep Agents\n#\n# Runs tests/evals on a cron schedule (once per day).\n# Single job; model/"
},
{
"path": ".github/workflows/harbor.yml",
"chars": 6730,
"preview": "name: \"⚓ Harbor\"\n\non:\n workflow_dispatch:\n inputs:\n models:\n description: \"Model set to run. Set definit"
},
{
"path": ".github/workflows/pr_labeler.yml",
"chars": 10979,
"preview": "# Unified PR labeler — applies size, file-based, title-based, and\n# contributor classification labels in a single sequen"
},
{
"path": ".github/workflows/pr_labeler_backfill.yml",
"chars": 4809,
"preview": "# Backfill PR labels on all open PRs.\n#\n# Manual-only workflow that applies the same labels as pr_labeler.yml\n# (size, f"
},
{
"path": ".github/workflows/pr_lint.yml",
"chars": 3301,
"preview": "# PR title linting.\n#\n# FORMAT (Conventional Commits 1.0.0):\n#\n# <type>[optional scope]: <description>\n# [optional b"
},
{
"path": ".github/workflows/release-please.yml",
"chars": 3422,
"preview": "# Creates release PRs based on conventional commits.\n#\n# When commits land on main, release-please analyzes them and eit"
},
{
"path": ".github/workflows/release.yml",
"chars": 27742,
"preview": "# Builds and publishes deepagents packages to PyPI.\n#\n# Triggers:\n# - Automatically via workflow_call from release-pleas"
},
{
"path": ".github/workflows/require_issue_link.yml",
"chars": 14768,
"preview": "# Require external PRs to link to an approved issue or discussion using\n# GitHub auto-close keywords (Fixes #NNN, Closes"
},
{
"path": ".github/workflows/sync_priority_labels.yml",
"chars": 18146,
"preview": "# Sync priority labels (p0–p3) from linked issues to PRs.\n#\n# Triggers:\n# 1. PR opened/edited — parse issue links, copy "
},
{
"path": ".github/workflows/tag-external-issues.yml",
"chars": 7567,
"preview": "# Automatically tag issues as \"external\" or \"internal\" based on whether\n# the author is a member of the langchain-ai Git"
},
{
"path": ".gitignore",
"chars": 4845,
"preview": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[codz]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packag"
},
{
"path": ".markdownlint.json",
"chars": 195,
"preview": "{\n \"MD013\": false,\n \"MD024\": {\n \"siblings_only\": true\n },\n \"MD025\": false,\n \"MD033\": false,\n \"MD034\": false,\n "
},
{
"path": ".mcp.json",
"chars": 233,
"preview": "{\n \"mcpServers\": {\n \"docs-langchain\": {\n \"type\": \"http\",\n \"url\": \"https://docs.langchain.com/mcp\"\n },\n "
},
{
"path": ".pre-commit-config.yaml",
"chars": 2468,
"preview": "repos:\n - repo: https://github.com/pre-commit/pre-commit-hooks\n rev: v4.3.0\n hooks:\n - id: no-commit-to-bran"
},
{
"path": ".release-please-manifest.json",
"chars": 27,
"preview": "{\n \"libs/cli\": \"0.0.34\"\n}\n"
},
{
"path": ".vscode/extensions.json",
"chars": 274,
"preview": "{\n \"recommendations\": [\n \"ms-python.python\",\n \"charliermarsh.ruff\",\n \"astral-sh.ty\",\n \"davidanson.vscode-ma"
},
{
"path": ".vscode/settings.json",
"chars": 1943,
"preview": "{\n \"prettier.enable\": false,\n \"python.analysis.include\": [\n \"libs/**\"\n ],\n \"python.analysis.exclude\": [\n \"**/n"
},
{
"path": "AGENTS.md",
"chars": 16933,
"preview": "# Global development guidelines for the Deep Agents monorepo\n\nThis document provides context to understand the Deep Agen"
},
{
"path": "LICENSE",
"chars": 1067,
"preview": "MIT License\n\nCopyright (c) LangChain, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy"
},
{
"path": "Makefile",
"chars": 1322,
"preview": "PACKAGE_DIRS := $(sort $(patsubst %/,%,$(dir $(wildcard libs/*/Makefile libs/partners/*/Makefile))))\n\n# Map package dirs"
},
{
"path": "README.md",
"chars": 5784,
"preview": "<div align=\"center\">\n <a href=\"https://docs.langchain.com/oss/python/deepagents/overview#deep-agents-overview\">\n <pi"
},
{
"path": "action.yml",
"chars": 9223,
"preview": "name: \"Deep Agents\"\ndescription: \"Run Deep Agents CLI coding assistant in GitHub workflows\"\nauthor: \"LangChain AI\"\nbrand"
},
{
"path": "examples/README.md",
"chars": 1998,
"preview": "<p align=\"center\">\n <picture>\n <source media=\"(prefers-color-scheme: light)\" srcset=\"../.github/images/logo-light.sv"
},
{
"path": "examples/content-builder-agent/.gitignore",
"chars": 139,
"preview": "# Output folders\nblogs/\nlinkedin/\ntweets/\nresearch/\n\n# Python\n__pycache__/\n*.pyc\n.venv/\n\n# Lock file (regenerated by uv)"
},
{
"path": "examples/content-builder-agent/AGENTS.md",
"chars": 1729,
"preview": "# Content Writer Agent\n\nYou are a content writer for a technology company. Your job is to create engaging, informative c"
},
{
"path": "examples/content-builder-agent/README.md",
"chars": 5279,
"preview": "# Content Builder Agent\n\n<img width=\"1255\" height=\"756\" alt=\"content-cover-image\" src=\"https://github.com/user-attachmen"
},
{
"path": "examples/content-builder-agent/content_writer.py",
"chars": 9904,
"preview": "#!/usr/bin/env python3\nimport warnings\nwarnings.filterwarnings(\"ignore\", message=\"Core Pydantic V1 functionality\")\n\n\"\"\"\n"
},
{
"path": "examples/content-builder-agent/pyproject.toml",
"chars": 426,
"preview": "[project]\nname = \"content-builder-agent\"\nversion = \"0.1.0\"\ndescription = \"A content writer agent configured entirely thr"
},
{
"path": "examples/content-builder-agent/skills/blog-post/SKILL.md",
"chars": 4714,
"preview": "---\nname: blog-post\ndescription: Writes and structures long-form blog posts, creates tutorial outlines, and optimizes co"
},
{
"path": "examples/content-builder-agent/skills/social-media/SKILL.md",
"chars": 4880,
"preview": "---\nname: social-media\ndescription: Drafts engaging social media posts, writes hooks, suggests hashtags, creates thread "
},
{
"path": "examples/content-builder-agent/subagents.yaml",
"chars": 1171,
"preview": "# Subagent definitions\n# These are loaded by content_writer.py and wired up with tools\n\nresearcher:\n description: >\n "
},
{
"path": "examples/deep_research/README.md",
"chars": 5211,
"preview": "# 🚀 Deep Research\n\n## 🚀 Quickstart\n\n**Prerequisites**: Install [uv](https://docs.astral.sh/uv/) package manager:\n\n```bas"
},
{
"path": "examples/deep_research/agent.py",
"chars": 1799,
"preview": "\"\"\"Research Agent - Standalone script for LangGraph deployment.\n\nThis module creates a deep research agent with custom t"
},
{
"path": "examples/deep_research/langgraph.json",
"chars": 98,
"preview": "{\n \"dependencies\": [\".\"],\n \"graphs\": {\n \"research\": \"./agent.py:agent\"\n },\n \"env\": \".env\"\n}"
},
{
"path": "examples/deep_research/pyproject.toml",
"chars": 1425,
"preview": "[project]\nname = \"deep-research-example\"\nversion = \"0.1.0\"\ndescription = \"Deep research agent example using deepagents p"
},
{
"path": "examples/deep_research/research_agent/__init__.py",
"chars": 539,
"preview": "\"\"\"Deep Research Agent Example.\n\nThis module demonstrates building a research agent using the deepagents package\nwith cu"
},
{
"path": "examples/deep_research/research_agent/prompts.py",
"chars": 7783,
"preview": "\"\"\"Prompt templates and tool descriptions for the research deepagent.\"\"\"\n\nRESEARCH_WORKFLOW_INSTRUCTIONS = \"\"\"# Research"
},
{
"path": "examples/deep_research/research_agent/tools.py",
"chars": 3655,
"preview": "\"\"\"Research Tools.\n\nThis module provides search and content processing utilities for the research agent,\nusing Tavily fo"
},
{
"path": "examples/deep_research/research_agent.ipynb",
"chars": 377577,
"preview": "{\n \"cells\": [\n {\n \"cell_type\": \"code\",\n \"execution_count\": 1,\n \"id\": \"fecc3e39\",\n \"metadata\": {},\n \"outputs\":"
},
{
"path": "examples/deep_research/utils.py",
"chars": 3333,
"preview": "\"\"\"Utility functions for displaying messages and prompts in Jupyter notebooks.\"\"\"\n\nimport json\n\nfrom rich.console import"
},
{
"path": "examples/downloading_agents/README.md",
"chars": 1329,
"preview": "# Downloading Agents\n\nAgents are just folders. This means you can share, download, and run them instantly.\n\n## Why This "
},
{
"path": "examples/nvidia_deep_agent/.gitignore",
"chars": 566,
"preview": "# Ignore all cloned repositories to avoid nested git issues\n# These are managed independently and should not be committe"
},
{
"path": "examples/nvidia_deep_agent/README.md",
"chars": 7785,
"preview": "# Nemotron Deep Agent + GPU Skills\n\nGeneral-purpose deep agent showcasing **multi-model architecture** with **GPU code e"
},
{
"path": "examples/nvidia_deep_agent/langgraph.json",
"chars": 104,
"preview": "{\n \"dependencies\": [\".\"],\n \"graphs\": {\n \"deepagent\": \"./src/agent.py:agent\"\n },\n \"env\": \".env\"\n}\n"
},
{
"path": "examples/nvidia_deep_agent/pyproject.toml",
"chars": 761,
"preview": "[project]\nname = \"nemotron-deep-agent\"\nversion = \"0.1.0\"\ndescription = \"General-purpose deep agent: frontier orchestrato"
},
{
"path": "examples/nvidia_deep_agent/skills/cudf-analytics/SKILL.md",
"chars": 3555,
"preview": "---\nname: cudf-analytics\ndescription: Use for GPU-accelerated data analysis on datasets, CSVs, or tabular data using NVI"
},
{
"path": "examples/nvidia_deep_agent/skills/cuml-machine-learning/SKILL.md",
"chars": 7005,
"preview": "---\nname: cuml-machine-learning\ndescription: Use for GPU-accelerated machine learning on tabular data using NVIDIA cuML."
},
{
"path": "examples/nvidia_deep_agent/skills/data-visualization/SKILL.md",
"chars": 11603,
"preview": "---\nname: data-visualization\ndescription: Use for creating publication-quality charts and multi-panel analysis summaries"
},
{
"path": "examples/nvidia_deep_agent/skills/gpu-document-processing/SKILL.md",
"chars": 3745,
"preview": "---\nname: gpu-document-processing\ndescription: Use when processing large PDFs, document collections, or bulk text extrac"
},
{
"path": "examples/nvidia_deep_agent/src/AGENTS.md",
"chars": 10650,
"preview": "## Available Subagents\n\n1. **researcher-agent**: Gathers and synthesizes information via web search. Give one focused re"
},
{
"path": "examples/nvidia_deep_agent/src/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "examples/nvidia_deep_agent/src/agent.py",
"chars": 3265,
"preview": "\"\"\"NVIDIA Deep Agent Skills.\n\nGeneral-purpose deep agent showcasing multi-model architecture:\n- Frontier model as orches"
},
{
"path": "examples/nvidia_deep_agent/src/backend.py",
"chars": 3494,
"preview": "\"\"\"Backend configuration: Modal sandbox with skills/memory uploaded on creation.\"\"\"\n\nfrom pathlib import Path\n\nimport mo"
},
{
"path": "examples/nvidia_deep_agent/src/prompts.py",
"chars": 7561,
"preview": "\"\"\"Prompt templates for the NVIDIA Deep Agent Skills example.\n\nAdapted from NVIDIA's AIQ Blueprint (orchestrator.j2, res"
},
{
"path": "examples/nvidia_deep_agent/src/tools.py",
"chars": 2278,
"preview": "\"\"\"Research Tools.\n\nThis module provides search and content processing utilities for the research agent,\nusing Tavily fo"
},
{
"path": "examples/ralph_mode/README.md",
"chars": 3102,
"preview": "# Ralph Mode for Deep Agents\n\n\n\n## What is Ralph?\n\nRalph is an autonomous l"
},
{
"path": "examples/ralph_mode/ralph_mode.py",
"chars": 8814,
"preview": "\"\"\"Ralph Mode - Autonomous looping for Deep Agents.\n\nRalph is an autonomous looping pattern created by Geoff Huntley\n(ht"
},
{
"path": "examples/text-to-sql-agent/.gitignore",
"chars": 309,
"preview": "# Environment variables\n.env\n\n# Database\nchinook.db\n\n# Virtual environment\n.venv/\nvenv/\nenv/\n\n# Python\n__pycache__/\n*.py"
},
{
"path": "examples/text-to-sql-agent/AGENTS.md",
"chars": 1738,
"preview": "# Text-to-SQL Agent Instructions\n\nYou are a Deep Agent designed to interact with a SQL database.\n\n## Your Role\n\nGiven a "
},
{
"path": "examples/text-to-sql-agent/README.md",
"chars": 7330,
"preview": "# Text-to-SQL Deep Agent\n\nA natural language to SQL query agent powered by LangChain's **Deep Agents** framework. This "
},
{
"path": "examples/text-to-sql-agent/agent.py",
"chars": 3320,
"preview": "import argparse\nimport os\nimport sys\n\nfrom deepagents import create_deep_agent\nfrom deepagents.backends import Filesyste"
},
{
"path": "examples/text-to-sql-agent/pyproject.toml",
"chars": 752,
"preview": "[project]\nname = \"text2sql-deepagent\"\nversion = \"0.1.0\"\ndescription = \"A natural language to SQL query agent powered by "
},
{
"path": "examples/text-to-sql-agent/skills/query-writing/SKILL.md",
"chars": 2212,
"preview": "---\nname: query-writing\ndescription: Writes and executes SQL queries from simple SELECTs to complex multi-table JOINs, a"
},
{
"path": "examples/text-to-sql-agent/skills/schema-exploration/SKILL.md",
"chars": 3794,
"preview": "---\nname: schema-exploration\ndescription: Lists tables, describes columns and data types, identifies foreign key relatio"
},
{
"path": "libs/README.md",
"chars": 1283,
"preview": "# Deep Agents Monorepo\n\n> [!IMPORTANT]\n> Refer to the [LangChain contributing guide](https://docs.langchain.com/oss/pyth"
},
{
"path": "libs/acp/Makefile",
"chars": 1676,
"preview": ".PHONY: lint format type typecheck test help test_watch toad\n\n.DEFAULT_GOAL := help\n\n######################\n# TESTING AN"
},
{
"path": "libs/acp/README.md",
"chars": 3010,
"preview": "# Deep Agents ACP integration\n\nThis directory contains an [Agent Client Protocol (ACP)](https://agentclientprotocol.com/"
},
{
"path": "libs/acp/deepagents_acp/__init__.py",
"chars": 57,
"preview": "\"\"\"Agent Client Protocol integration for Deep Agents.\"\"\"\n"
},
{
"path": "libs/acp/deepagents_acp/__main__.py",
"chars": 267,
"preview": "\"\"\"Entry point for running the ACP server as a module.\"\"\"\n\nimport asyncio\n\nfrom deepagents_acp.server import _serve_test"
},
{
"path": "libs/acp/deepagents_acp/py.typed.py",
"chars": 0,
"preview": ""
},
{
"path": "libs/acp/deepagents_acp/server.py",
"chars": 36193,
"preview": "\"\"\"ACP server implementation for Deep Agents.\"\"\"\n\nfrom __future__ import annotations\n\nimport json\nfrom dataclasses impor"
},
{
"path": "libs/acp/deepagents_acp/utils.py",
"chars": 12135,
"preview": "\"\"\"Utility functions for converting ACP content blocks to LangChain formats.\"\"\"\n\nfrom __future__ import annotations\n\nimp"
},
{
"path": "libs/acp/examples/__init__.py",
"chars": 38,
"preview": "\"\"\"Initialize the examples module.\"\"\"\n"
},
{
"path": "libs/acp/examples/demo_agent.py",
"chars": 3853,
"preview": "\"\"\"Demo coding agent using ACP.\"\"\"\n\nimport asyncio\nimport os\n\nfrom acp import (\n run_agent as run_acp_agent,\n)\nfrom a"
},
{
"path": "libs/acp/examples/local_context.py",
"chars": 16504,
"preview": "\"\"\"Middleware for injecting local context into system prompt.\n\nDetects git state, project structure, package managers, r"
},
{
"path": "libs/acp/pyproject.toml",
"chars": 3993,
"preview": "[build-system]\nrequires = [\"hatchling\"]\nbuild-backend = \"hatchling.build\"\n\n[project]\nname = \"deepagents-acp\"\nversion = \""
},
{
"path": "libs/acp/run_demo_agent.sh",
"chars": 244,
"preview": "#!/bin/bash\n# Wrapper script to run deepagents-acp with the deps in the script directory\n# but with the current working "
},
{
"path": "libs/acp/tests/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "libs/acp/tests/chat_model.py",
"chars": 9190,
"preview": "\"\"\"Fake chat models for testing purposes.\"\"\"\n\nimport re\nfrom collections.abc import Callable, Iterator, Sequence\nfrom ty"
},
{
"path": "libs/acp/tests/test_agent.py",
"chars": 32903,
"preview": "from __future__ import annotations\n\nimport asyncio\nfrom typing import Any, Literal\n\nimport pytest\nfrom acp import text_b"
},
{
"path": "libs/acp/tests/test_command_allowlist.py",
"chars": 15279,
"preview": "\"\"\"Test command type allowlist functionality for execute tool.\"\"\"\n\nfrom deepagents import create_deep_agent\nfrom langcha"
},
{
"path": "libs/acp/tests/test_main.py",
"chars": 130,
"preview": "from __future__ import annotations\n\n\ndef test_import_main_module() -> None:\n from deepagents_acp import __main__ # n"
},
{
"path": "libs/acp/tests/test_utils.py",
"chars": 2092,
"preview": "from __future__ import annotations\n\nfrom acp.schema import (\n EmbeddedResourceContentBlock,\n ImageContentBlock,\n "
},
{
"path": "libs/cli/CHANGELOG.md",
"chars": 45928,
"preview": "# Changelog\n\n## [0.0.34](https://github.com/langchain-ai/deepagents/compare/deepagents-cli==0.0.33...deepagents-cli==0.0"
},
{
"path": "libs/cli/Makefile",
"chars": 2705,
"preview": ".PHONY: format lint type typecheck test tests integration_test integration_tests test_watch benchmark help run lint_pack"
},
{
"path": "libs/cli/README.md",
"chars": 3160,
"preview": "# 🧠🤖 Deep Agents CLI\n\n[](https://pypi.org/proje"
},
{
"path": "libs/cli/deepagents_cli/__init__.py",
"chars": 966,
"preview": "\"\"\"Deep Agents CLI - Interactive AI coding assistant.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import TYPE_CH"
},
{
"path": "libs/cli/deepagents_cli/__main__.py",
"chars": 143,
"preview": "\"\"\"Allow running the CLI as: python -m deepagents.cli.\"\"\"\n\nfrom deepagents_cli.main import cli_main\n\nif __name__ == \"__m"
},
{
"path": "libs/cli/deepagents_cli/_ask_user_types.py",
"chars": 2265,
"preview": "\"\"\"Lightweight types for the ask-user interrupt protocol.\n\nExtracted from `ask_user` so `textual_adapter` can import `As"
},
{
"path": "libs/cli/deepagents_cli/_cli_context.py",
"chars": 783,
"preview": "\"\"\"Lightweight runtime context type for CLI model overrides.\n\nExtracted from `configurable_model` so hot-path modules (`"
},
{
"path": "libs/cli/deepagents_cli/_debug.py",
"chars": 1548,
"preview": "\"\"\"Shared debug-logging configuration for verbose file-based tracing.\n\nWhen the `DEEPAGENTS_DEBUG` environment variable "
},
{
"path": "libs/cli/deepagents_cli/_server_config.py",
"chars": 11376,
"preview": "\"\"\"Typed configuration for the CLI-to-server subprocess communication channel.\n\nThe CLI spawns a `langgraph dev` subproc"
},
{
"path": "libs/cli/deepagents_cli/_server_constants.py",
"chars": 189,
"preview": "\"\"\"Shared constants for server communication between CLI and server graph.\"\"\"\n\nENV_PREFIX = \"DA_SERVER_\"\n\"\"\"Environment "
},
{
"path": "libs/cli/deepagents_cli/_session_stats.py",
"chars": 4039,
"preview": "\"\"\"Lightweight session statistics and token formatting utilities.\n\nThis module is intentionally kept free of heavy depen"
},
{
"path": "libs/cli/deepagents_cli/_testing_models.py",
"chars": 5619,
"preview": "\"\"\"Internal chat models used by local integration tests.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import TYPE"
},
{
"path": "libs/cli/deepagents_cli/_version.py",
"chars": 588,
"preview": "\"\"\"Version information and lightweight constants for `deepagents-cli`.\"\"\"\n\n__version__ = \"0.0.34\" # x-release-please-ve"
},
{
"path": "libs/cli/deepagents_cli/agent.py",
"chars": 34943,
"preview": "\"\"\"Agent management and creation for the CLI.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nimport os\nimport re"
},
{
"path": "libs/cli/deepagents_cli/app.py",
"chars": 167816,
"preview": "\"\"\"Textual UI application for deepagents-cli.\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport json\nimport "
},
{
"path": "libs/cli/deepagents_cli/app.tcss",
"chars": 4296,
"preview": "/* Deep Agents CLI Textual Stylesheet */\n\n/* Define layers for z-ordering */\nScreen {\n layout: vertical;\n layers: "
},
{
"path": "libs/cli/deepagents_cli/ask_user.py",
"chars": 11215,
"preview": "\"\"\"Ask user middleware for interactive question-answering during agent execution.\"\"\"\n\nfrom __future__ import annotations"
},
{
"path": "libs/cli/deepagents_cli/built_in_skills/__init__.py",
"chars": 189,
"preview": "\"\"\"Built-in skills that ship with the Deep Agents CLI.\n\nThese skills are always available at the lowest precedence level"
},
{
"path": "libs/cli/deepagents_cli/built_in_skills/skill-creator/SKILL.md",
"chars": 19098,
"preview": "---\nname: skill-creator\ndescription: \"Guide for creating effective skills that extend agent capabilities with specialize"
},
{
"path": "libs/cli/deepagents_cli/built_in_skills/skill-creator/scripts/init_skill.py",
"chars": 13060,
"preview": "#!/usr/bin/env python3\n\"\"\"Skill Initializer - Creates a new skill from template.\n\nUsage:\n init_skill.py <skill-name> --p"
},
{
"path": "libs/cli/deepagents_cli/built_in_skills/skill-creator/scripts/quick_validate.py",
"chars": 5026,
"preview": "#!/usr/bin/env python3\n\"\"\"Quick validation script for skills - minimal version.\n\nFor deepagents CLI, skills are located "
},
{
"path": "libs/cli/deepagents_cli/clipboard.py",
"chars": 3652,
"preview": "\"\"\"Clipboard utilities for deepagents-cli.\"\"\"\n\nfrom __future__ import annotations\n\nimport base64\nimport logging\nimport o"
},
{
"path": "libs/cli/deepagents_cli/command_registry.py",
"chars": 6396,
"preview": "\"\"\"Unified slash-command registry.\n\nEvery slash command is declared once as a `SlashCommand` entry in `COMMANDS`.\nBypass"
},
{
"path": "libs/cli/deepagents_cli/config.py",
"chars": 69129,
"preview": "\"\"\"Configuration, constants, and model creation for the CLI.\"\"\"\n\nfrom __future__ import annotations\n\nimport importlib\nim"
},
{
"path": "libs/cli/deepagents_cli/configurable_model.py",
"chars": 5701,
"preview": "\"\"\"CLI middleware for runtime model selection via LangGraph runtime context.\n\nAllows switching the model per invocation "
},
{
"path": "libs/cli/deepagents_cli/default_agent_prompt.md",
"chars": 336,
"preview": "# Project Notes\n\nThis file is for tracking project-specific context as you work.\nYou can update this file to remember de"
},
{
"path": "libs/cli/deepagents_cli/editor.py",
"chars": 4054,
"preview": "\"\"\"External editor support for composing prompts.\"\"\"\n\nfrom __future__ import annotations\n\nimport contextlib\nimport loggi"
},
{
"path": "libs/cli/deepagents_cli/file_ops.py",
"chars": 16746,
"preview": "\"\"\"Helpers for tracking file operations and computing diffs for CLI display.\"\"\"\n\nfrom __future__ import annotations\n\nimp"
},
{
"path": "libs/cli/deepagents_cli/hooks.py",
"chars": 6926,
"preview": "\"\"\"Lightweight hook dispatch for external tool integration.\n\nLoads hook configuration from `~/.deepagents/hooks.json` an"
},
{
"path": "libs/cli/deepagents_cli/input.py",
"chars": 24405,
"preview": "\"\"\"Input handling utilities including image/video tracking and file mention parsing.\"\"\"\n\nimport logging\nimport re\nimport"
},
{
"path": "libs/cli/deepagents_cli/integrations/__init__.py",
"chars": 68,
"preview": "\"\"\"Integrations for external systems used by the deepagents CLI.\"\"\"\n"
},
{
"path": "libs/cli/deepagents_cli/integrations/sandbox_factory.py",
"chars": 22692,
"preview": "\"\"\"Sandbox lifecycle management with provider abstraction.\"\"\"\n\nfrom __future__ import annotations\n\nimport contextlib\nimp"
},
{
"path": "libs/cli/deepagents_cli/integrations/sandbox_provider.py",
"chars": 1820,
"preview": "\"\"\"Sandbox provider interface used by the deepagents CLI.\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nfrom ab"
},
{
"path": "libs/cli/deepagents_cli/local_context.py",
"chars": 19474,
"preview": "\"\"\"Middleware for injecting local context into system prompt.\n\nDetects git state, project structure, package managers, r"
},
{
"path": "libs/cli/deepagents_cli/main.py",
"chars": 55801,
"preview": "\"\"\"Main entry point and CLI loop for deepagents.\"\"\"\n\n# ruff: noqa: E402\n# Imports placed after warning filters to suppre"
},
{
"path": "libs/cli/deepagents_cli/mcp_tools.py",
"chars": 24735,
"preview": "\"\"\"MCP (Model Context Protocol) tools loader for deepagents CLI.\n\nThis module provides async functions to load and manag"
},
{
"path": "libs/cli/deepagents_cli/mcp_trust.py",
"chars": 4814,
"preview": "\"\"\"Trust store for project-level MCP server configurations.\n\nManages persistent approval of project-level MCP configs th"
},
{
"path": "libs/cli/deepagents_cli/media_utils.py",
"chars": 14881,
"preview": "\"\"\"Utilities for handling image and video media from clipboard and files.\"\"\"\n\nimport base64\nimport io\nimport logging\nimp"
},
{
"path": "libs/cli/deepagents_cli/model_config.py",
"chars": 55177,
"preview": "\"\"\"Model configuration management.\n\nHandles loading and saving model configuration from TOML files, providing a\nstructur"
},
{
"path": "libs/cli/deepagents_cli/non_interactive.py",
"chars": 34288,
"preview": "\"\"\"Non-interactive execution mode for deepagents CLI.\n\nProvides `run_non_interactive` which runs a single user task agai"
},
{
"path": "libs/cli/deepagents_cli/offload.py",
"chars": 12787,
"preview": "\"\"\"Business logic for the /offload command.\n\nExtracts the core offload workflow from the UI layer so it can be\ntested in"
},
{
"path": "libs/cli/deepagents_cli/output.py",
"chars": 2042,
"preview": "\"\"\"Machine-readable JSON output helpers for CLI subcommands.\n\nThis module deliberately stays stdlib-only so it can be im"
},
{
"path": "libs/cli/deepagents_cli/project_utils.py",
"chars": 5836,
"preview": "\"\"\"Utilities for project root detection and project-specific configuration.\"\"\"\n\nfrom __future__ import annotations\n\nimpo"
},
{
"path": "libs/cli/deepagents_cli/prompts.py",
"chars": 4114,
"preview": "# ruff: noqa: E501 # Long prompt strings\n\"\"\"Prompt constants for slash commands.\"\"\"\n\nREMEMBER_PROMPT = \"\"\"Review our co"
},
{
"path": "libs/cli/deepagents_cli/py.typed",
"chars": 0,
"preview": ""
},
{
"path": "libs/cli/deepagents_cli/remote_client.py",
"chars": 17266,
"preview": "\"\"\"Remote agent client — thin wrapper around LangGraph's `RemoteGraph`.\n\nDelegates streaming, state management, and SSE "
},
{
"path": "libs/cli/deepagents_cli/server.py",
"chars": 15879,
"preview": "\"\"\"LangGraph server lifecycle management for the CLI.\n\nHandles starting/stopping a `langgraph dev` server process and ge"
},
{
"path": "libs/cli/deepagents_cli/server_graph.py",
"chars": 7032,
"preview": "\"\"\"Server-side graph entry point for `langgraph dev`.\n\nThis module is referenced by the generated `langgraph.json` and e"
},
{
"path": "libs/cli/deepagents_cli/server_manager.py",
"chars": 11540,
"preview": "\"\"\"Server lifecycle orchestration for the CLI.\n\nProvides `start_server_and_get_agent` which handles the full flow of:\n\n1"
},
{
"path": "libs/cli/deepagents_cli/sessions.py",
"chars": 39824,
"preview": "\"\"\"Thread management using LangGraph's built-in checkpoint persistence.\"\"\"\n\nfrom __future__ import annotations\n\nimport a"
},
{
"path": "libs/cli/deepagents_cli/skills/__init__.py",
"chars": 437,
"preview": "\"\"\"Skills module for deepagents CLI.\n\nPublic API:\n- execute_skills_command: Execute skills subcommands (list/create/info"
},
{
"path": "libs/cli/deepagents_cli/skills/commands.py",
"chars": 35735,
"preview": "\"\"\"CLI commands for skill management.\n\nThese commands are registered with the CLI via main.py:\n- deepagents skills list "
},
{
"path": "libs/cli/deepagents_cli/skills/load.py",
"chars": 7668,
"preview": "\"\"\"Skill loader for CLI commands.\n\nThis module provides filesystem-based skill discovery for CLI operations\n(list, creat"
},
{
"path": "libs/cli/deepagents_cli/subagents.py",
"chars": 5015,
"preview": "\"\"\"Subagent loader for CLI.\n\nLoads custom subagent definitions from the filesystem. Subagents are defined\nas markdown fi"
},
{
"path": "libs/cli/deepagents_cli/system_prompt.md",
"chars": 11397,
"preview": "# Deep Agents CLI\n\nYou are a Deep Agent, an AI assistant running in {mode_description}. You help with tasks like coding,"
},
{
"path": "libs/cli/deepagents_cli/textual_adapter.py",
"chars": 60047,
"preview": "\"\"\"Textual UI adapter for agent execution.\"\"\"\n# This module has complex streaming logic ported from execution.py\n\nfrom _"
},
{
"path": "libs/cli/deepagents_cli/tool_display.py",
"chars": 11758,
"preview": "\"\"\"Formatting utilities for tool call display in the CLI.\n\nThis module handles rendering tool calls and tool messages fo"
},
{
"path": "libs/cli/deepagents_cli/tools.py",
"chars": 7754,
"preview": "\"\"\"Custom tools for the CLI agent.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Litera"
},
{
"path": "libs/cli/deepagents_cli/ui.py",
"chars": 14209,
"preview": "\"\"\"Help screens and argparse utilities for the CLI.\n\nThis module is imported at CLI startup to wire `-h` actions into th"
},
{
"path": "libs/cli/deepagents_cli/unicode_security.py",
"chars": 16144,
"preview": "\"\"\"Unicode security helpers for deceptive text and URL checks.\n\nThis module is intentionally lightweight so it can be im"
},
{
"path": "libs/cli/deepagents_cli/update_check.py",
"chars": 10636,
"preview": "\"\"\"Update lifecycle for `deepagents-cli`.\n\nHandles version checking against PyPI (with caching), install-method detectio"
},
{
"path": "libs/cli/deepagents_cli/widgets/__init__.py",
"chars": 231,
"preview": "\"\"\"Textual widgets for deepagents-cli.\n\nImport directly from submodules, e.g.:\n\n ```python\n from deepagents_cli.wi"
},
{
"path": "libs/cli/deepagents_cli/widgets/_links.py",
"chars": 2166,
"preview": "\"\"\"Shared link-click handling for Textual widgets.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nimport webbrow"
},
{
"path": "libs/cli/deepagents_cli/widgets/approval.py",
"chars": 17015,
"preview": "\"\"\"Approval widget for HITL - using standard Textual patterns.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing impor"
},
{
"path": "libs/cli/deepagents_cli/widgets/ask_user.py",
"chars": 14556,
"preview": "\"\"\"Ask user widget for interactive questions during agent execution.\"\"\"\n\nfrom __future__ import annotations\n\nimport logg"
},
{
"path": "libs/cli/deepagents_cli/widgets/autocomplete.py",
"chars": 22697,
"preview": "\"\"\"Autocomplete system for @ mentions and / commands.\n\nThis is a custom implementation that handles trigger-based comple"
},
{
"path": "libs/cli/deepagents_cli/widgets/chat_input.py",
"chars": 66768,
"preview": "\"\"\"Chat input widget for deepagents-cli with autocomplete and history support.\"\"\"\n\nfrom __future__ import annotations\n\ni"
},
{
"path": "libs/cli/deepagents_cli/widgets/diff.py",
"chars": 6956,
"preview": "\"\"\"Enhanced diff widget for displaying unified diffs.\"\"\"\n\nfrom __future__ import annotations\n\nimport re\nfrom typing impo"
},
{
"path": "libs/cli/deepagents_cli/widgets/history.py",
"chars": 6365,
"preview": "\"\"\"Command history manager for input persistence.\"\"\"\n\nfrom __future__ import annotations\n\nimport json\nimport logging\nfro"
},
{
"path": "libs/cli/deepagents_cli/widgets/loading.py",
"chars": 5000,
"preview": "\"\"\"Loading widget with animated spinner for agent activity.\"\"\"\n\nfrom __future__ import annotations\n\nfrom time import tim"
},
{
"path": "libs/cli/deepagents_cli/widgets/mcp_viewer.py",
"chars": 11651,
"preview": "\"\"\"Read-only MCP server and tool viewer modal.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING,"
},
{
"path": "libs/cli/deepagents_cli/widgets/message_store.py",
"chars": 19936,
"preview": "\"\"\"Message store for virtualized chat history.\n\nThis module provides data structures and management for message virtuali"
},
{
"path": "libs/cli/deepagents_cli/widgets/messages.py",
"chars": 49605,
"preview": "\"\"\"Message widgets for deepagents-cli.\"\"\"\n\nfrom __future__ import annotations\n\nimport ast\nimport json\nimport logging\nimp"
},
{
"path": "libs/cli/deepagents_cli/widgets/model_selector.py",
"chars": 31717,
"preview": "\"\"\"Interactive model selector screen for /model command.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nfrom typ"
},
{
"path": "libs/cli/deepagents_cli/widgets/status.py",
"chars": 11594,
"preview": "\"\"\"Status bar widget for deepagents-cli.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nfrom contextlib import s"
},
{
"path": "libs/cli/deepagents_cli/widgets/thread_selector.py",
"chars": 62384,
"preview": "\"\"\"Interactive thread selector screen for /threads command.\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimpor"
},
{
"path": "libs/cli/deepagents_cli/widgets/tool_renderers.py",
"chars": 3902,
"preview": "\"\"\"Tool renderers for approval widgets - registry pattern.\"\"\"\n\nfrom __future__ import annotations\n\nimport difflib\nfrom t"
},
{
"path": "libs/cli/deepagents_cli/widgets/tool_widgets.py",
"chars": 9007,
"preview": "\"\"\"Tool-specific approval widgets for HITL display.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHEC"
},
{
"path": "libs/cli/deepagents_cli/widgets/welcome.py",
"chars": 9867,
"preview": "\"\"\"Welcome banner widget for deepagents-cli.\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport random\nfrom t"
},
{
"path": "libs/cli/examples/skills/arxiv-search/SKILL.md",
"chars": 1007,
"preview": "---\nname: arxiv-search\ndescription: Searches arXiv for preprints and academic papers, retrieves abstracts, and filters b"
},
{
"path": "libs/cli/examples/skills/arxiv-search/arxiv_search.py",
"chars": 1673,
"preview": "#!/usr/bin/env python3\n\"\"\"arXiv Search.\n\nSearches the arXiv preprint repository for research papers.\n\"\"\"\n\nimport argpars"
},
{
"path": "libs/cli/examples/skills/langgraph-docs/SKILL.md",
"chars": 1130,
"preview": "---\nname: langgraph-docs\ndescription: Fetches and references LangGraph Python documentation to build stateful agents, cr"
},
{
"path": "libs/cli/examples/skills/skill-creator/SKILL.md",
"chars": 18901,
"preview": "---\nname: skill-creator\ndescription: \"Guide for creating effective skills that extend agent capabilities with specialize"
},
{
"path": "libs/cli/examples/skills/skill-creator/scripts/init_skill.py",
"chars": 11122,
"preview": "#!/usr/bin/env python3\n\"\"\"Skill Initializer - Creates a new skill from template.\n\nUsage:\n init_skill.py <skill-name> --p"
},
{
"path": "libs/cli/examples/skills/skill-creator/scripts/quick_validate.py",
"chars": 4818,
"preview": "#!/usr/bin/env python3\n\"\"\"Quick validation script for skills - minimal version.\n\nFor deepagents CLI, skills are located "
},
{
"path": "libs/cli/examples/skills/web-research/SKILL.md",
"chars": 3274,
"preview": "---\nname: web-research\ndescription: Searches multiple web sources, synthesizes findings, and produces cited research rep"
},
{
"path": "libs/cli/pyproject.toml",
"chars": 10160,
"preview": "[build-system]\nrequires = [\"hatchling\"]\nbuild-backend = \"hatchling.build\"\n\n[project]\nname = \"deepagents-cli\"\nversion = \""
},
{
"path": "libs/cli/scripts/check_imports.py",
"chars": 881,
"preview": "\"\"\"Check imports script.\n\nQuickly verify that a list of Python files can be loaded by the Python interpreter\nwithout rai"
},
{
"path": "libs/cli/scripts/install.sh",
"chars": 12218,
"preview": "#!/usr/bin/env bash\n# Install deepagents-cli via uv.\n#\n# Interactive mode detection, color logging, and optional tool in"
},
{
"path": "libs/cli/tests/README.md",
"chars": 247,
"preview": "# Deep Agents CLI Tests\n\n## API Keys\n\n### Required\n\n- **`ANTHROPIC_API_KEY`** - Required for integration tests that use "
},
{
"path": "libs/cli/tests/integration_tests/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "libs/cli/tests/integration_tests/benchmarks/__init__.py",
"chars": 0,
"preview": ""
}
]
// ... and 290 more files (download for full content)
About this extraction
This page contains the full source code of the langchain-ai/deepagents GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 490 files (12.8 MB), approximately 3.4M tokens, and a symbol index with 6736 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.