Showing preview only (6,860K chars total). Download the full file or copy to clipboard to get everything.
Repository: openai/openai-agents-python
Branch: main
Commit: 34ff8481bb40
Files: 870
Total size: 6.4 MB
Directory structure:
gitextract_tsktwg2s/
├── .agents/
│ └── skills/
│ ├── code-change-verification/
│ │ ├── SKILL.md
│ │ ├── agents/
│ │ │ └── openai.yaml
│ │ └── scripts/
│ │ ├── run.ps1
│ │ └── run.sh
│ ├── docs-sync/
│ │ ├── SKILL.md
│ │ ├── agents/
│ │ │ └── openai.yaml
│ │ └── references/
│ │ └── doc-coverage-checklist.md
│ ├── examples-auto-run/
│ │ ├── SKILL.md
│ │ ├── agents/
│ │ │ └── openai.yaml
│ │ └── scripts/
│ │ └── run.sh
│ ├── final-release-review/
│ │ ├── SKILL.md
│ │ ├── agents/
│ │ │ └── openai.yaml
│ │ ├── references/
│ │ │ └── review-checklist.md
│ │ └── scripts/
│ │ └── find_latest_release_tag.sh
│ ├── implementation-strategy/
│ │ ├── SKILL.md
│ │ └── agents/
│ │ └── openai.yaml
│ ├── openai-knowledge/
│ │ ├── SKILL.md
│ │ └── agents/
│ │ └── openai.yaml
│ ├── pr-draft-summary/
│ │ ├── SKILL.md
│ │ └── agents/
│ │ └── openai.yaml
│ └── test-coverage-improver/
│ ├── SKILL.md
│ └── agents/
│ └── openai.yaml
├── .github/
│ ├── ISSUE_TEMPLATE/
│ │ ├── bug_report.md
│ │ ├── feature_request.md
│ │ ├── model_provider.md
│ │ └── question.md
│ ├── PULL_REQUEST_TEMPLATE/
│ │ └── pull_request_template.md
│ ├── codex/
│ │ ├── prompts/
│ │ │ ├── pr-labels.md
│ │ │ └── release-review.md
│ │ └── schemas/
│ │ └── pr-labels.json
│ ├── dependabot.yml
│ ├── scripts/
│ │ ├── detect-changes.sh
│ │ ├── pr_labels.py
│ │ ├── run-asyncio-teardown-stability.sh
│ │ └── select-release-milestone.py
│ └── workflows/
│ ├── docs.yml
│ ├── issues.yml
│ ├── pr-labels.yml
│ ├── publish.yml
│ ├── release-pr-update.yml
│ ├── release-pr.yml
│ ├── release-tag.yml
│ ├── tests.yml
│ └── update-docs.yml
├── .gitignore
├── .prettierrc
├── .vscode/
│ ├── launch.json
│ └── settings.json
├── AGENTS.md
├── CLAUDE.md
├── LICENSE
├── Makefile
├── PLANS.md
├── README.md
├── docs/
│ ├── agents.md
│ ├── config.md
│ ├── context.md
│ ├── examples.md
│ ├── guardrails.md
│ ├── handoffs.md
│ ├── human_in_the_loop.md
│ ├── index.md
│ ├── ja/
│ │ ├── agents.md
│ │ ├── config.md
│ │ ├── context.md
│ │ ├── examples.md
│ │ ├── guardrails.md
│ │ ├── handoffs.md
│ │ ├── human_in_the_loop.md
│ │ ├── index.md
│ │ ├── mcp.md
│ │ ├── models/
│ │ │ ├── index.md
│ │ │ └── litellm.md
│ │ ├── multi_agent.md
│ │ ├── quickstart.md
│ │ ├── realtime/
│ │ │ ├── guide.md
│ │ │ ├── quickstart.md
│ │ │ └── transport.md
│ │ ├── release.md
│ │ ├── repl.md
│ │ ├── results.md
│ │ ├── running_agents.md
│ │ ├── sessions/
│ │ │ ├── advanced_sqlite_session.md
│ │ │ ├── encrypted_session.md
│ │ │ ├── index.md
│ │ │ └── sqlalchemy_session.md
│ │ ├── sessions.md
│ │ ├── streaming.md
│ │ ├── tools.md
│ │ ├── tracing.md
│ │ ├── usage.md
│ │ ├── visualization.md
│ │ └── voice/
│ │ ├── pipeline.md
│ │ ├── quickstart.md
│ │ └── tracing.md
│ ├── ko/
│ │ ├── agents.md
│ │ ├── config.md
│ │ ├── context.md
│ │ ├── examples.md
│ │ ├── guardrails.md
│ │ ├── handoffs.md
│ │ ├── human_in_the_loop.md
│ │ ├── index.md
│ │ ├── mcp.md
│ │ ├── models/
│ │ │ ├── index.md
│ │ │ └── litellm.md
│ │ ├── multi_agent.md
│ │ ├── quickstart.md
│ │ ├── realtime/
│ │ │ ├── guide.md
│ │ │ ├── quickstart.md
│ │ │ └── transport.md
│ │ ├── release.md
│ │ ├── repl.md
│ │ ├── results.md
│ │ ├── running_agents.md
│ │ ├── sessions/
│ │ │ ├── advanced_sqlite_session.md
│ │ │ ├── encrypted_session.md
│ │ │ ├── index.md
│ │ │ └── sqlalchemy_session.md
│ │ ├── sessions.md
│ │ ├── streaming.md
│ │ ├── tools.md
│ │ ├── tracing.md
│ │ ├── usage.md
│ │ ├── visualization.md
│ │ └── voice/
│ │ ├── pipeline.md
│ │ ├── quickstart.md
│ │ └── tracing.md
│ ├── llms-full.txt
│ ├── llms.txt
│ ├── mcp.md
│ ├── models/
│ │ ├── index.md
│ │ └── litellm.md
│ ├── multi_agent.md
│ ├── quickstart.md
│ ├── realtime/
│ │ ├── guide.md
│ │ ├── quickstart.md
│ │ └── transport.md
│ ├── ref/
│ │ ├── agent.md
│ │ ├── agent_output.md
│ │ ├── agent_tool_input.md
│ │ ├── agent_tool_state.md
│ │ ├── apply_diff.md
│ │ ├── computer.md
│ │ ├── editor.md
│ │ ├── exceptions.md
│ │ ├── extensions/
│ │ │ ├── experimental/
│ │ │ │ └── codex/
│ │ │ │ ├── codex.md
│ │ │ │ ├── codex_options.md
│ │ │ │ ├── codex_tool.md
│ │ │ │ ├── events.md
│ │ │ │ ├── exec.md
│ │ │ │ ├── items.md
│ │ │ │ ├── output_schema_file.md
│ │ │ │ ├── payloads.md
│ │ │ │ ├── thread.md
│ │ │ │ ├── thread_options.md
│ │ │ │ └── turn_options.md
│ │ │ ├── handoff_filters.md
│ │ │ ├── handoff_prompt.md
│ │ │ ├── litellm.md
│ │ │ ├── memory/
│ │ │ │ ├── advanced_sqlite_session.md
│ │ │ │ ├── async_sqlite_session.md
│ │ │ │ ├── dapr_session.md
│ │ │ │ ├── encrypt_session.md
│ │ │ │ ├── redis_session.md
│ │ │ │ └── sqlalchemy_session.md
│ │ │ ├── models/
│ │ │ │ ├── litellm_model.md
│ │ │ │ └── litellm_provider.md
│ │ │ ├── tool_output_trimmer.md
│ │ │ └── visualization.md
│ │ ├── function_schema.md
│ │ ├── guardrail.md
│ │ ├── handoffs/
│ │ │ └── history.md
│ │ ├── handoffs.md
│ │ ├── index.md
│ │ ├── items.md
│ │ ├── lifecycle.md
│ │ ├── logger.md
│ │ ├── mcp/
│ │ │ ├── manager.md
│ │ │ ├── server.md
│ │ │ └── util.md
│ │ ├── memory/
│ │ │ ├── openai_conversations_session.md
│ │ │ ├── openai_responses_compaction_session.md
│ │ │ ├── session.md
│ │ │ ├── session_settings.md
│ │ │ ├── sqlite_session.md
│ │ │ └── util.md
│ │ ├── memory.md
│ │ ├── model_settings.md
│ │ ├── models/
│ │ │ ├── chatcmpl_converter.md
│ │ │ ├── chatcmpl_helpers.md
│ │ │ ├── chatcmpl_stream_handler.md
│ │ │ ├── default_models.md
│ │ │ ├── fake_id.md
│ │ │ ├── interface.md
│ │ │ ├── multi_provider.md
│ │ │ ├── openai_chatcompletions.md
│ │ │ ├── openai_provider.md
│ │ │ └── openai_responses.md
│ │ ├── prompts.md
│ │ ├── realtime/
│ │ │ ├── agent.md
│ │ │ ├── audio_formats.md
│ │ │ ├── config.md
│ │ │ ├── events.md
│ │ │ ├── handoffs.md
│ │ │ ├── items.md
│ │ │ ├── model.md
│ │ │ ├── model_events.md
│ │ │ ├── model_inputs.md
│ │ │ ├── openai_realtime.md
│ │ │ ├── runner.md
│ │ │ └── session.md
│ │ ├── repl.md
│ │ ├── responses_websocket_session.md
│ │ ├── result.md
│ │ ├── retry.md
│ │ ├── run.md
│ │ ├── run_config.md
│ │ ├── run_context.md
│ │ ├── run_error_handlers.md
│ │ ├── run_internal/
│ │ │ ├── agent_runner_helpers.md
│ │ │ ├── approvals.md
│ │ │ ├── error_handlers.md
│ │ │ ├── guardrails.md
│ │ │ ├── items.md
│ │ │ ├── model_retry.md
│ │ │ ├── oai_conversation.md
│ │ │ ├── run_loop.md
│ │ │ ├── run_steps.md
│ │ │ ├── session_persistence.md
│ │ │ ├── streaming.md
│ │ │ ├── tool_actions.md
│ │ │ ├── tool_execution.md
│ │ │ ├── tool_planning.md
│ │ │ ├── tool_use_tracker.md
│ │ │ ├── turn_preparation.md
│ │ │ └── turn_resolution.md
│ │ ├── run_state.md
│ │ ├── stream_events.md
│ │ ├── strict_schema.md
│ │ ├── tool.md
│ │ ├── tool_context.md
│ │ ├── tool_guardrails.md
│ │ ├── tracing/
│ │ │ ├── config.md
│ │ │ ├── context.md
│ │ │ ├── create.md
│ │ │ ├── index.md
│ │ │ ├── logger.md
│ │ │ ├── model_tracing.md
│ │ │ ├── processor_interface.md
│ │ │ ├── processors.md
│ │ │ ├── provider.md
│ │ │ ├── scope.md
│ │ │ ├── setup.md
│ │ │ ├── span_data.md
│ │ │ ├── spans.md
│ │ │ ├── traces.md
│ │ │ └── util.md
│ │ ├── usage.md
│ │ ├── version.md
│ │ └── voice/
│ │ ├── events.md
│ │ ├── exceptions.md
│ │ ├── imports.md
│ │ ├── input.md
│ │ ├── model.md
│ │ ├── models/
│ │ │ ├── openai_model_provider.md
│ │ │ ├── openai_provider.md
│ │ │ ├── openai_stt.md
│ │ │ └── openai_tts.md
│ │ ├── pipeline.md
│ │ ├── pipeline_config.md
│ │ ├── result.md
│ │ ├── utils.md
│ │ └── workflow.md
│ ├── release.md
│ ├── repl.md
│ ├── results.md
│ ├── running_agents.md
│ ├── scripts/
│ │ ├── generate_ref_files.py
│ │ └── translate_docs.py
│ ├── sessions/
│ │ ├── advanced_sqlite_session.md
│ │ ├── encrypted_session.md
│ │ ├── index.md
│ │ └── sqlalchemy_session.md
│ ├── streaming.md
│ ├── stylesheets/
│ │ └── extra.css
│ ├── tools.md
│ ├── tracing.md
│ ├── usage.md
│ ├── visualization.md
│ ├── voice/
│ │ ├── pipeline.md
│ │ ├── quickstart.md
│ │ └── tracing.md
│ └── zh/
│ ├── agents.md
│ ├── config.md
│ ├── context.md
│ ├── examples.md
│ ├── guardrails.md
│ ├── handoffs.md
│ ├── human_in_the_loop.md
│ ├── index.md
│ ├── mcp.md
│ ├── models/
│ │ ├── index.md
│ │ └── litellm.md
│ ├── multi_agent.md
│ ├── quickstart.md
│ ├── realtime/
│ │ ├── guide.md
│ │ ├── quickstart.md
│ │ └── transport.md
│ ├── release.md
│ ├── repl.md
│ ├── results.md
│ ├── running_agents.md
│ ├── sessions/
│ │ ├── advanced_sqlite_session.md
│ │ ├── encrypted_session.md
│ │ ├── index.md
│ │ └── sqlalchemy_session.md
│ ├── sessions.md
│ ├── streaming.md
│ ├── tools.md
│ ├── tracing.md
│ ├── usage.md
│ ├── visualization.md
│ └── voice/
│ ├── pipeline.md
│ ├── quickstart.md
│ └── tracing.md
├── examples/
│ ├── __init__.py
│ ├── agent_patterns/
│ │ ├── README.md
│ │ ├── agents_as_tools.py
│ │ ├── agents_as_tools_conditional.py
│ │ ├── agents_as_tools_streaming.py
│ │ ├── agents_as_tools_structured.py
│ │ ├── deterministic.py
│ │ ├── forcing_tool_use.py
│ │ ├── human_in_the_loop.py
│ │ ├── human_in_the_loop_custom_rejection.py
│ │ ├── human_in_the_loop_stream.py
│ │ ├── input_guardrails.py
│ │ ├── llm_as_a_judge.py
│ │ ├── output_guardrails.py
│ │ ├── parallelization.py
│ │ ├── routing.py
│ │ └── streaming_guardrails.py
│ ├── auto_mode.py
│ ├── basic/
│ │ ├── agent_lifecycle_example.py
│ │ ├── dynamic_system_prompt.py
│ │ ├── hello_world.py
│ │ ├── hello_world_gpt_5.py
│ │ ├── hello_world_gpt_oss.py
│ │ ├── hello_world_jupyter.ipynb
│ │ ├── image_tool_output.py
│ │ ├── lifecycle_example.py
│ │ ├── local_file.py
│ │ ├── local_image.py
│ │ ├── non_strict_output_type.py
│ │ ├── previous_response_id.py
│ │ ├── prompt_template.py
│ │ ├── remote_image.py
│ │ ├── remote_pdf.py
│ │ ├── retry.py
│ │ ├── retry_litellm.py
│ │ ├── stream_function_call_args.py
│ │ ├── stream_items.py
│ │ ├── stream_text.py
│ │ ├── stream_ws.py
│ │ ├── tool_guardrails.py
│ │ ├── tools.py
│ │ └── usage_tracking.py
│ ├── customer_service/
│ │ └── main.py
│ ├── financial_research_agent/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── agents/
│ │ │ ├── __init__.py
│ │ │ ├── financials_agent.py
│ │ │ ├── planner_agent.py
│ │ │ ├── risk_agent.py
│ │ │ ├── search_agent.py
│ │ │ ├── verifier_agent.py
│ │ │ └── writer_agent.py
│ │ ├── main.py
│ │ ├── manager.py
│ │ └── printer.py
│ ├── handoffs/
│ │ ├── message_filter.py
│ │ └── message_filter_streaming.py
│ ├── hosted_mcp/
│ │ ├── __init__.py
│ │ ├── connectors.py
│ │ ├── human_in_the_loop.py
│ │ ├── on_approval.py
│ │ └── simple.py
│ ├── mcp/
│ │ ├── filesystem_example/
│ │ │ ├── README.md
│ │ │ ├── main.py
│ │ │ └── sample_files/
│ │ │ ├── favorite_books.txt
│ │ │ ├── favorite_cities.txt
│ │ │ └── favorite_songs.txt
│ │ ├── get_all_mcp_tools_example/
│ │ │ ├── README.md
│ │ │ ├── main.py
│ │ │ └── sample_files/
│ │ │ ├── books.txt
│ │ │ └── favorite_songs.txt
│ │ ├── git_example/
│ │ │ ├── README.md
│ │ │ └── main.py
│ │ ├── manager_example/
│ │ │ ├── README.md
│ │ │ ├── app.py
│ │ │ └── mcp_server.py
│ │ ├── prompt_server/
│ │ │ ├── README.md
│ │ │ ├── main.py
│ │ │ └── server.py
│ │ ├── sse_example/
│ │ │ ├── README.md
│ │ │ ├── main.py
│ │ │ └── server.py
│ │ ├── sse_remote_example/
│ │ │ ├── README.md
│ │ │ └── main.py
│ │ ├── streamable_http_remote_example/
│ │ │ ├── README.md
│ │ │ └── main.py
│ │ ├── streamablehttp_custom_client_example/
│ │ │ ├── README.md
│ │ │ ├── main.py
│ │ │ └── server.py
│ │ ├── streamablehttp_example/
│ │ │ ├── README.md
│ │ │ ├── main.py
│ │ │ └── server.py
│ │ └── tool_filter_example/
│ │ ├── README.md
│ │ ├── main.py
│ │ └── sample_files/
│ │ ├── books.txt
│ │ └── favorite_songs.txt
│ ├── memory/
│ │ ├── advanced_sqlite_session_example.py
│ │ ├── compaction_session_example.py
│ │ ├── compaction_session_stateless_example.py
│ │ ├── dapr_session_example.py
│ │ ├── encrypted_session_example.py
│ │ ├── file_hitl_example.py
│ │ ├── file_session.py
│ │ ├── hitl_session_scenario.py
│ │ ├── memory_session_hitl_example.py
│ │ ├── openai_session_example.py
│ │ ├── openai_session_hitl_example.py
│ │ ├── redis_session_example.py
│ │ ├── sqlalchemy_session_example.py
│ │ └── sqlite_session_example.py
│ ├── model_providers/
│ │ ├── README.md
│ │ ├── custom_example_agent.py
│ │ ├── custom_example_global.py
│ │ ├── custom_example_provider.py
│ │ ├── litellm_auto.py
│ │ └── litellm_provider.py
│ ├── realtime/
│ │ ├── app/
│ │ │ ├── README.md
│ │ │ ├── agent.py
│ │ │ ├── server.py
│ │ │ └── static/
│ │ │ ├── app.js
│ │ │ ├── audio-playback.worklet.js
│ │ │ ├── audio-recorder.worklet.js
│ │ │ └── index.html
│ │ ├── cli/
│ │ │ └── demo.py
│ │ ├── twilio/
│ │ │ ├── README.md
│ │ │ ├── __init__.py
│ │ │ ├── requirements.txt
│ │ │ ├── server.py
│ │ │ └── twilio_handler.py
│ │ └── twilio_sip/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── agents.py
│ │ ├── requirements.txt
│ │ └── server.py
│ ├── reasoning_content/
│ │ ├── __init__.py
│ │ ├── gpt_oss_stream.py
│ │ ├── main.py
│ │ └── runner_example.py
│ ├── research_bot/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── agents/
│ │ │ ├── __init__.py
│ │ │ ├── planner_agent.py
│ │ │ ├── search_agent.py
│ │ │ └── writer_agent.py
│ │ ├── main.py
│ │ ├── manager.py
│ │ ├── printer.py
│ │ └── sample_outputs/
│ │ ├── product_recs.md
│ │ ├── product_recs.txt
│ │ ├── vacation.md
│ │ └── vacation.txt
│ ├── run_examples.py
│ ├── tools/
│ │ ├── apply_patch.py
│ │ ├── code_interpreter.py
│ │ ├── codex.py
│ │ ├── codex_same_thread.py
│ │ ├── computer_use.py
│ │ ├── container_shell_inline_skill.py
│ │ ├── container_shell_skill_reference.py
│ │ ├── file_search.py
│ │ ├── image_generator.py
│ │ ├── local_shell_skill.py
│ │ ├── shell.py
│ │ ├── shell_human_in_the_loop.py
│ │ ├── skills/
│ │ │ └── csv-workbench/
│ │ │ ├── SKILL.md
│ │ │ └── playbook.md
│ │ ├── tool_search.py
│ │ ├── web_search.py
│ │ └── web_search_filters.py
│ └── voice/
│ ├── __init__.py
│ ├── static/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── main.py
│ │ └── util.py
│ └── streamed/
│ ├── README.md
│ ├── __init__.py
│ ├── main.py
│ └── my_workflow.py
├── mkdocs.yml
├── pyproject.toml
├── pyrightconfig.json
├── src/
│ └── agents/
│ ├── __init__.py
│ ├── _config.py
│ ├── _debug.py
│ ├── _mcp_tool_metadata.py
│ ├── _tool_identity.py
│ ├── agent.py
│ ├── agent_output.py
│ ├── agent_tool_input.py
│ ├── agent_tool_state.py
│ ├── apply_diff.py
│ ├── computer.py
│ ├── editor.py
│ ├── exceptions.py
│ ├── extensions/
│ │ ├── __init__.py
│ │ ├── experimental/
│ │ │ ├── __init__.py
│ │ │ └── codex/
│ │ │ ├── __init__.py
│ │ │ ├── codex.py
│ │ │ ├── codex_options.py
│ │ │ ├── codex_tool.py
│ │ │ ├── events.py
│ │ │ ├── exec.py
│ │ │ ├── items.py
│ │ │ ├── output_schema_file.py
│ │ │ ├── payloads.py
│ │ │ ├── thread.py
│ │ │ ├── thread_options.py
│ │ │ └── turn_options.py
│ │ ├── handoff_filters.py
│ │ ├── handoff_prompt.py
│ │ ├── memory/
│ │ │ ├── __init__.py
│ │ │ ├── advanced_sqlite_session.py
│ │ │ ├── async_sqlite_session.py
│ │ │ ├── dapr_session.py
│ │ │ ├── encrypt_session.py
│ │ │ ├── redis_session.py
│ │ │ └── sqlalchemy_session.py
│ │ ├── models/
│ │ │ ├── __init__.py
│ │ │ ├── litellm_model.py
│ │ │ └── litellm_provider.py
│ │ ├── tool_output_trimmer.py
│ │ └── visualization.py
│ ├── function_schema.py
│ ├── guardrail.py
│ ├── handoffs/
│ │ ├── __init__.py
│ │ └── history.py
│ ├── items.py
│ ├── lifecycle.py
│ ├── logger.py
│ ├── mcp/
│ │ ├── __init__.py
│ │ ├── manager.py
│ │ ├── server.py
│ │ └── util.py
│ ├── memory/
│ │ ├── __init__.py
│ │ ├── openai_conversations_session.py
│ │ ├── openai_responses_compaction_session.py
│ │ ├── session.py
│ │ ├── session_settings.py
│ │ ├── sqlite_session.py
│ │ └── util.py
│ ├── model_settings.py
│ ├── models/
│ │ ├── __init__.py
│ │ ├── _openai_retry.py
│ │ ├── _openai_shared.py
│ │ ├── _retry_runtime.py
│ │ ├── chatcmpl_converter.py
│ │ ├── chatcmpl_helpers.py
│ │ ├── chatcmpl_stream_handler.py
│ │ ├── default_models.py
│ │ ├── fake_id.py
│ │ ├── interface.py
│ │ ├── multi_provider.py
│ │ ├── openai_chatcompletions.py
│ │ ├── openai_provider.py
│ │ ├── openai_responses.py
│ │ └── reasoning_content_replay.py
│ ├── prompts.py
│ ├── py.typed
│ ├── realtime/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── _default_tracker.py
│ │ ├── _util.py
│ │ ├── agent.py
│ │ ├── audio_formats.py
│ │ ├── config.py
│ │ ├── events.py
│ │ ├── handoffs.py
│ │ ├── items.py
│ │ ├── model.py
│ │ ├── model_events.py
│ │ ├── model_inputs.py
│ │ ├── openai_realtime.py
│ │ ├── runner.py
│ │ └── session.py
│ ├── repl.py
│ ├── responses_websocket_session.py
│ ├── result.py
│ ├── retry.py
│ ├── run.py
│ ├── run_config.py
│ ├── run_context.py
│ ├── run_error_handlers.py
│ ├── run_internal/
│ │ ├── __init__.py
│ │ ├── _asyncio_progress.py
│ │ ├── agent_runner_helpers.py
│ │ ├── approvals.py
│ │ ├── error_handlers.py
│ │ ├── guardrails.py
│ │ ├── items.py
│ │ ├── model_retry.py
│ │ ├── oai_conversation.py
│ │ ├── run_loop.py
│ │ ├── run_steps.py
│ │ ├── session_persistence.py
│ │ ├── streaming.py
│ │ ├── tool_actions.py
│ │ ├── tool_execution.py
│ │ ├── tool_planning.py
│ │ ├── tool_use_tracker.py
│ │ ├── turn_preparation.py
│ │ └── turn_resolution.py
│ ├── run_state.py
│ ├── stream_events.py
│ ├── strict_schema.py
│ ├── tool.py
│ ├── tool_context.py
│ ├── tool_guardrails.py
│ ├── tracing/
│ │ ├── __init__.py
│ │ ├── config.py
│ │ ├── context.py
│ │ ├── create.py
│ │ ├── logger.py
│ │ ├── model_tracing.py
│ │ ├── processor_interface.py
│ │ ├── processors.py
│ │ ├── provider.py
│ │ ├── scope.py
│ │ ├── setup.py
│ │ ├── span_data.py
│ │ ├── spans.py
│ │ ├── traces.py
│ │ └── util.py
│ ├── usage.py
│ ├── util/
│ │ ├── __init__.py
│ │ ├── _approvals.py
│ │ ├── _coro.py
│ │ ├── _error_tracing.py
│ │ ├── _json.py
│ │ ├── _pretty_print.py
│ │ ├── _transforms.py
│ │ └── _types.py
│ ├── version.py
│ └── voice/
│ ├── __init__.py
│ ├── events.py
│ ├── exceptions.py
│ ├── imports.py
│ ├── input.py
│ ├── model.py
│ ├── models/
│ │ ├── __init__.py
│ │ ├── openai_model_provider.py
│ │ ├── openai_stt.py
│ │ └── openai_tts.py
│ ├── pipeline.py
│ ├── pipeline_config.py
│ ├── result.py
│ ├── utils.py
│ └── workflow.py
└── tests/
├── README.md
├── __init__.py
├── conftest.py
├── extensions/
│ ├── experiemental/
│ │ └── codex/
│ │ ├── test_codex_exec_thread.py
│ │ └── test_codex_tool.py
│ ├── memory/
│ │ ├── test_advanced_sqlite_session.py
│ │ ├── test_async_sqlite_session.py
│ │ ├── test_dapr_redis_integration.py
│ │ ├── test_dapr_session.py
│ │ ├── test_encrypt_session.py
│ │ ├── test_redis_session.py
│ │ └── test_sqlalchemy_session.py
│ └── test_tool_output_trimmer.py
├── fake_model.py
├── fastapi/
│ ├── __init__.py
│ ├── streaming_app.py
│ └── test_streaming_context.py
├── mcp/
│ ├── __init__.py
│ ├── helpers.py
│ ├── test_caching.py
│ ├── test_client_session_retries.py
│ ├── test_connect_disconnect.py
│ ├── test_mcp_approval.py
│ ├── test_mcp_auth_params.py
│ ├── test_mcp_server_manager.py
│ ├── test_mcp_tracing.py
│ ├── test_mcp_util.py
│ ├── test_message_handler.py
│ ├── test_prompt_server.py
│ ├── test_runner_calls_mcp.py
│ ├── test_server_errors.py
│ ├── test_streamable_http_client_factory.py
│ ├── test_streamable_http_session_id.py
│ └── test_tool_filtering.py
├── memory/
│ └── test_openai_responses_compaction_session.py
├── model_settings/
│ └── test_serialization.py
├── models/
│ ├── __init__.py
│ ├── test_deepseek_reasoning_content.py
│ ├── test_default_models.py
│ ├── test_kwargs_functionality.py
│ ├── test_litellm_chatcompletions_stream.py
│ ├── test_litellm_extra_body.py
│ ├── test_litellm_logging_patch.py
│ ├── test_litellm_user_agent.py
│ ├── test_map.py
│ └── test_reasoning_content_replay_hook.py
├── realtime/
│ ├── __init__.py
│ ├── test_agent.py
│ ├── test_audio_formats_unit.py
│ ├── test_conversion_helpers.py
│ ├── test_ga_session_update_normalization.py
│ ├── test_item_parsing.py
│ ├── test_model_events.py
│ ├── test_openai_realtime.py
│ ├── test_openai_realtime_conversions.py
│ ├── test_openai_realtime_sip_model.py
│ ├── test_playback_tracker.py
│ ├── test_playback_tracker_manual_unit.py
│ ├── test_realtime_handoffs.py
│ ├── test_realtime_model_settings.py
│ ├── test_runner.py
│ ├── test_session.py
│ ├── test_session_payload_and_formats.py
│ ├── test_tracing.py
│ └── test_twilio_sip_server.py
├── test_agent_as_tool.py
├── test_agent_clone_shallow_copy.py
├── test_agent_config.py
├── test_agent_hooks.py
├── test_agent_instructions_signature.py
├── test_agent_llm_hooks.py
├── test_agent_memory_leak.py
├── test_agent_prompt.py
├── test_agent_runner.py
├── test_agent_runner_streamed.py
├── test_agent_runner_sync.py
├── test_agent_tool_input.py
├── test_agent_tool_state.py
├── test_agent_tracing.py
├── test_agents_logging.py
├── test_anthropic_thinking_blocks.py
├── test_apply_diff.py
├── test_apply_diff_helpers.py
├── test_apply_patch_tool.py
├── test_asyncio_progress.py
├── test_call_model_input_filter.py
├── test_call_model_input_filter_unit.py
├── test_cancel_streaming.py
├── test_computer_action.py
├── test_computer_tool_lifecycle.py
├── test_config.py
├── test_debug.py
├── test_doc_parsing.py
├── test_example_workflows.py
├── test_extended_thinking_message_order.py
├── test_extension_filters.py
├── test_extra_headers.py
├── test_function_schema.py
├── test_function_tool.py
├── test_function_tool_decorator.py
├── test_gemini_thought_signatures.py
├── test_gemini_thought_signatures_stream.py
├── test_global_hooks.py
├── test_guardrails.py
├── test_handoff_history_duplication.py
├── test_handoff_prompt.py
├── test_handoff_tool.py
├── test_hitl_error_scenarios.py
├── test_hitl_session_scenario.py
├── test_hitl_utils.py
├── test_items_helpers.py
├── test_local_shell_tool.py
├── test_logprobs.py
├── test_max_turns.py
├── test_model_payload_iterators.py
├── test_model_retry.py
├── test_openai_chatcompletions.py
├── test_openai_chatcompletions_converter.py
├── test_openai_chatcompletions_stream.py
├── test_openai_conversations_session.py
├── test_openai_responses.py
├── test_openai_responses_converter.py
├── test_output_tool.py
├── test_pr_labels.py
├── test_pretty_print.py
├── test_process_model_response.py
├── test_reasoning_content.py
├── test_remove_openai_responses_api_incompatible_fields.py
├── test_repl.py
├── test_responses.py
├── test_responses_tracing.py
├── test_responses_websocket_session.py
├── test_result_cast.py
├── test_run.py
├── test_run_config.py
├── test_run_context_approvals.py
├── test_run_context_wrapper.py
├── test_run_error_details.py
├── test_run_hooks.py
├── test_run_impl_resume_paths.py
├── test_run_internal_error_handlers.py
├── test_run_internal_items.py
├── test_run_state.py
├── test_run_step_execution.py
├── test_run_step_processing.py
├── test_runner_guardrail_resume.py
├── test_server_conversation_tracker.py
├── test_session.py
├── test_session_exceptions.py
├── test_session_limit.py
├── test_shell_call_serialization.py
├── test_shell_tool.py
├── test_soft_cancel.py
├── test_source_compat_constructors.py
├── test_stream_events.py
├── test_stream_input_guardrail_timing.py
├── test_streaming_logging.py
├── test_streaming_tool_call_arguments.py
├── test_strict_schema.py
├── test_strict_schema_oneof.py
├── test_tool_choice_reset.py
├── test_tool_context.py
├── test_tool_converter.py
├── test_tool_guardrails.py
├── test_tool_metadata.py
├── test_tool_output_conversion.py
├── test_tool_use_behavior.py
├── test_tool_use_tracker.py
├── test_trace_processor.py
├── test_tracing.py
├── test_tracing_errors.py
├── test_tracing_errors_streamed.py
├── test_tracing_provider_safe_debug.py
├── test_usage.py
├── test_visualization.py
├── testing_processor.py
├── tracing/
│ ├── test_import_side_effects.py
│ ├── test_logger.py
│ ├── test_processor_api_key.py
│ ├── test_set_api_key_fix.py
│ ├── test_setup.py
│ ├── test_trace_context.py
│ ├── test_traces_impl.py
│ └── test_tracing_env_disable.py
├── utils/
│ ├── factories.py
│ ├── hitl.py
│ ├── simple_session.py
│ ├── test_json.py
│ └── test_simple_session.py
└── voice/
├── __init__.py
├── fake_models.py
├── helpers.py
├── test_input.py
├── test_openai_stt.py
├── test_openai_tts.py
├── test_pipeline.py
└── test_workflow.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .agents/skills/code-change-verification/SKILL.md
================================================
---
name: code-change-verification
description: Run the mandatory verification stack when changes affect runtime code, tests, or build/test behavior in the OpenAI Agents Python repository.
---
# Code Change Verification
## Overview
Ensure work is only marked complete after formatting, linting, type checking, and tests pass. Use this skill when changes affect runtime code, tests, or build/test configuration. You can skip it for docs-only or repository metadata unless a user asks for the full stack.
## Quick start
1. Keep this skill at `./.agents/skills/code-change-verification` so it loads automatically for the repository.
2. macOS/Linux: `bash .agents/skills/code-change-verification/scripts/run.sh`.
3. Windows: `powershell -ExecutionPolicy Bypass -File .agents/skills/code-change-verification/scripts/run.ps1`.
4. If any command fails, fix the issue, rerun the script, and report the failing output.
5. Confirm completion only when all commands succeed with no remaining issues.
## Manual workflow
- If dependencies are not installed or have changed, run `make sync` first to install dev requirements via `uv`.
- Run from the repository root in this order: `make format`, `make lint`, `make typecheck`, `make tests`.
- Do not skip steps; stop and fix issues immediately when a command fails.
- Re-run the full stack after applying fixes so the commands execute in the required order.
## Resources
### scripts/run.sh
- Executes the full verification sequence with fail-fast semantics from the repository root. Prefer this entry point to ensure the required commands run in the correct order.
### scripts/run.ps1
- Windows-friendly wrapper that runs the same verification sequence with fail-fast semantics. Use from PowerShell with execution policy bypass if required by your environment.
================================================
FILE: .agents/skills/code-change-verification/agents/openai.yaml
================================================
interface:
display_name: "Code Change Verification"
short_description: "Run the required local verification stack"
default_prompt: "Use $code-change-verification to run the required local verification stack and report any failures."
================================================
FILE: .agents/skills/code-change-verification/scripts/run.ps1
================================================
Set-StrictMode -Version Latest
$ErrorActionPreference = "Stop"
$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Definition
$repoRoot = $null
try {
$repoRoot = (& git -C $scriptDir rev-parse --show-toplevel 2>$null)
} catch {
$repoRoot = $null
}
if (-not $repoRoot) {
$repoRoot = Resolve-Path (Join-Path $scriptDir "..\\..\\..\\..")
}
Set-Location $repoRoot
function Invoke-MakeStep {
param(
[Parameter(Mandatory = $true)][string]$Step
)
Write-Host "Running make $Step..."
& make $Step
if ($LASTEXITCODE -ne 0) {
Write-Error "code-change-verification: make $Step failed with exit code $LASTEXITCODE."
exit $LASTEXITCODE
}
}
Invoke-MakeStep -Step "format"
Invoke-MakeStep -Step "lint"
Invoke-MakeStep -Step "typecheck"
Invoke-MakeStep -Step "tests"
Write-Host "code-change-verification: all commands passed."
================================================
FILE: .agents/skills/code-change-verification/scripts/run.sh
================================================
#!/usr/bin/env bash
# Fail fast on any error or undefined variable.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if command -v git >/dev/null 2>&1; then
REPO_ROOT="$(git -C "${SCRIPT_DIR}" rev-parse --show-toplevel 2>/dev/null || true)"
fi
REPO_ROOT="${REPO_ROOT:-$(cd "${SCRIPT_DIR}/../../../.." && pwd)}"
cd "${REPO_ROOT}"
echo "Running make format..."
make format
echo "Running make lint..."
make lint
echo "Running make typecheck..."
make typecheck
echo "Running make tests..."
make tests
echo "code-change-verification: all commands passed."
================================================
FILE: .agents/skills/docs-sync/SKILL.md
================================================
---
name: docs-sync
description: Analyze main branch implementation and configuration to find missing, incorrect, or outdated documentation in docs/. Use when asked to audit doc coverage, sync docs with code, or propose doc updates/structure changes. Only update English docs under docs/** and never touch translated docs under docs/ja, docs/ko, or docs/zh. Provide a report and ask for approval before editing docs.
---
# Docs Sync
## Overview
Identify doc coverage gaps and inaccuracies by comparing main branch features and configuration options against the current docs structure, then propose targeted improvements.
## Workflow
1. Confirm scope and base branch
- Identify the current branch and default branch (usually `main`).
- Prefer analyzing the current branch to keep work aligned with in-flight changes.
- If the current branch is not `main`, analyze only the diff vs `main` to scope doc updates.
- Avoid switching branches if it would disrupt local changes; use `git show main:<path>` or `git worktree add` when needed.
2. Build a feature inventory from the selected scope
- If on `main`: inventory the full surface area and review docs comprehensively.
- If not on `main`: inventory only changes vs `main` (feature additions/changes/removals).
- Focus on user-facing behavior: public exports, configuration options, environment variables, CLI commands, default values, and documented runtime behaviors.
- Capture evidence for each item (file path + symbol/setting).
- Use targeted search to find option types and feature flags (for example: `rg "Settings"`, `rg "Config"`, `rg "os.environ"`, `rg "OPENAI_"`).
- When the topic involves OpenAI platform features, invoke `$openai-knowledge` to pull current details from the OpenAI Developer Docs MCP server instead of guessing, while treating the SDK source code as the source of truth when discrepancies appear.
3. Doc-first pass: review existing pages
- Walk each relevant page under `docs/` (excluding `docs/ja`, `docs/ko`, and `docs/zh`).
- Identify missing mentions of important, supported options (opt-in flags, env vars), customization points, or new features from `src/agents/` and `examples/`.
- Propose additions where users would reasonably expect to find them on that page.
4. Code-first pass: map features to docs
- Review the current docs information architecture under `docs/` and `mkdocs.yml`.
- Determine the best page/section for each feature based on existing patterns and the API reference structure under `docs/ref`.
- Identify features that lack any doc page or have a page but no corresponding content.
- Note when a structural adjustment would improve discoverability.
- When improving `docs/ref/*` pages, treat the corresponding docstrings/comments in `src/` as the source of truth. Prefer updating those code comments so regenerated reference docs stay correct, instead of hand-editing the generated pages.
5. Detect gaps and inaccuracies
- **Missing**: features/configs present in main but absent in docs.
- **Incorrect/outdated**: names, defaults, or behaviors that diverge from main.
- **Structural issues** (optional): pages overloaded, missing overviews, or mis-grouped topics.
6. Produce a Docs Sync Report and ask for approval
- Provide a clear report with evidence, suggested doc locations, and proposed edits.
- Ask the user whether to proceed with doc updates.
7. If approved, apply changes (English only)
- Edit only English docs in `docs/**`.
- Do **not** edit `docs/ja`, `docs/ko`, or `docs/zh`.
- Keep changes aligned with the existing docs style and navigation.
- Update `mkdocs.yml` when adding or renaming pages.
- Build docs with `make build-docs` after edits to verify the docs site still builds.
## Output format
Use this template when reporting findings:
Docs Sync Report
- Doc-first findings
- Page + missing content -> evidence + suggested insertion point
- Code-first gaps
- Feature + evidence -> suggested doc page/section (or missing page)
- Incorrect or outdated docs
- Doc file + issue + correct info + evidence
- Structural suggestions (optional)
- Proposed change + rationale
- Proposed edits
- Doc file -> concise change summary
- Questions for the user
## References
- `references/doc-coverage-checklist.md`
================================================
FILE: .agents/skills/docs-sync/agents/openai.yaml
================================================
interface:
display_name: "Docs Sync"
short_description: "Audit docs coverage and propose targeted updates"
default_prompt: "Use $docs-sync to audit the current branch against docs/ and propose targeted documentation updates."
================================================
FILE: .agents/skills/docs-sync/references/doc-coverage-checklist.md
================================================
# Doc Coverage Checklist
Use this checklist to scan the selected scope (main = comprehensive, or current-branch diff) and validate documentation coverage.
## Feature inventory targets
- Public exports: classes, functions, types, and module entry points.
- Configuration options: `*Settings` types, default config objects, and builder patterns.
- Environment variables or runtime flags.
- CLI commands, scripts, and example entry points that define supported usage.
- User-facing behaviors: retry, timeouts, streaming, errors, logging, telemetry, and data handling.
- Deprecations, removals, or renamed settings.
## Doc-first pass (page-by-page)
- Review each relevant English page (excluding `docs/ja`, `docs/ko`, and `docs/zh`).
- Look for missing opt-in flags, env vars, or customization options that the page implies.
- Add new features that belong on that page based on user intent and navigation.
## Code-first pass (feature inventory)
- Map features to the closest existing page based on the docs navigation in `mkdocs.yml`.
- Prefer updating existing pages over creating new ones unless the topic is clearly new.
- Use conceptual pages for cross-cutting concerns (auth, errors, streaming, tracing, tools).
- Keep quick-start flows minimal; move advanced details into deeper pages.
## Evidence capture
- Record the main-branch file path and symbol/setting name.
- Note defaults or behavior-critical details for accuracy checks.
- Avoid large code dumps; a short identifier is enough.
## Red flags for outdated or incorrect docs
- Option names/types no longer exist or differ from code.
- Default values or allowed ranges do not match implementation.
- Features removed in code but still documented.
- New behaviors introduced without corresponding docs updates.
## When to propose structural changes
- A page mixes unrelated audiences (quick-start + deep reference) without clear separation.
- Multiple pages duplicate the same concept without cross-links.
- New feature areas have no obvious home in the nav structure.
## Diff mode guidance (current branch vs main)
- Focus only on changed behavior: new exports/options, modified defaults, removed features, or renamed settings.
- Use `git diff main...HEAD` (or equivalent) to constrain analysis.
- Document removals explicitly so docs can be pruned if needed.
## Patch guidance
- Keep edits scoped and aligned with existing tone and format.
- Update cross-links when moving or renaming sections.
- Leave translated docs untouched; English-only updates.
================================================
FILE: .agents/skills/examples-auto-run/SKILL.md
================================================
---
name: examples-auto-run
description: Run python examples in auto mode with logging, rerun helpers, and background control.
---
# examples-auto-run
## What it does
- Runs `uv run examples/run_examples.py` with:
- `EXAMPLES_INTERACTIVE_MODE=auto` (auto-input/auto-approve).
- Per-example logs under `.tmp/examples-start-logs/`.
- Main summary log path passed via `--main-log` (also under `.tmp/examples-start-logs/`).
- Generates a rerun list of failures at `.tmp/examples-rerun.txt` when `--write-rerun` is set.
- Provides start/stop/status/logs/tail/collect/rerun helpers via `run.sh`.
- Background option keeps the process running with a pidfile; `stop` cleans it up.
## Usage
```bash
# Start (auto mode; interactive included by default)
.agents/skills/examples-auto-run/scripts/run.sh start [extra args to run_examples.py]
# Examples:
.agents/skills/examples-auto-run/scripts/run.sh start --filter basic
.agents/skills/examples-auto-run/scripts/run.sh start --include-server --include-audio
# Check status
.agents/skills/examples-auto-run/scripts/run.sh status
# Stop running job
.agents/skills/examples-auto-run/scripts/run.sh stop
# List logs
.agents/skills/examples-auto-run/scripts/run.sh logs
# Tail latest log (or specify one)
.agents/skills/examples-auto-run/scripts/run.sh tail
.agents/skills/examples-auto-run/scripts/run.sh tail main_20260113-123000.log
# Collect rerun list from a main log (defaults to latest main_*.log)
.agents/skills/examples-auto-run/scripts/run.sh collect
# Rerun only failed entries from rerun file (auto mode)
.agents/skills/examples-auto-run/scripts/run.sh rerun
```
## Defaults (overridable via env)
- `EXAMPLES_INTERACTIVE_MODE=auto`
- `EXAMPLES_INCLUDE_INTERACTIVE=1`
- `EXAMPLES_INCLUDE_SERVER=0`
- `EXAMPLES_INCLUDE_AUDIO=0`
- `EXAMPLES_INCLUDE_EXTERNAL=0`
- Auto-approvals in auto mode: `APPLY_PATCH_AUTO_APPROVE=1`, `SHELL_AUTO_APPROVE=1`, `AUTO_APPROVE_MCP=1`
## Log locations
- Main logs: `.tmp/examples-start-logs/main_*.log`
- Per-example logs (from `run_examples.py`): `.tmp/examples-start-logs/<module_path>.log`
- Rerun list: `.tmp/examples-rerun.txt`
- Stdout logs: `.tmp/examples-start-logs/stdout_*.log`
## Notes
- The runner delegates to `uv run examples/run_examples.py`, which already writes per-example logs and supports `--collect`, `--rerun-file`, and `--print-auto-skip`.
- `start` uses `--write-rerun` so failures are captured automatically.
- If `.tmp/examples-rerun.txt` exists and is non-empty, invoking the skill with no args runs `rerun` by default.
## Behavioral validation (Codex/LLM responsibility)
The runner does not perform any automated behavioral validation. After every foreground `start` or `rerun`, **Codex must manually validate** all exit-0 entries:
1. Read the example source (and comments) to infer intended flow, tools used, and expected key outputs.
2. Open the matching per-example log under `.tmp/examples-start-logs/`.
3. Confirm the intended actions/results occurred; flag omissions or divergences.
4. Do this for **all passed examples**, not just a sample.
5. Report immediately after the run with concise citations to the exact log lines that justify the validation.
================================================
FILE: .agents/skills/examples-auto-run/agents/openai.yaml
================================================
interface:
display_name: "Examples Auto Run"
short_description: "Run examples in auto mode with logs and rerun helpers"
default_prompt: "Use $examples-auto-run to run the repo examples in auto mode, collect logs, and summarize any failures."
================================================
FILE: .agents/skills/examples-auto-run/scripts/run.sh
================================================
#!/usr/bin/env bash
set -euo pipefail
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../../.." && pwd)"
PID_FILE="$ROOT/.tmp/examples-auto-run.pid"
LOG_DIR="$ROOT/.tmp/examples-start-logs"
RERUN_FILE="$ROOT/.tmp/examples-rerun.txt"
ensure_dirs() {
mkdir -p "$LOG_DIR" "$ROOT/.tmp"
}
is_running() {
local pid="$1"
[[ -n "$pid" ]] && ps -p "$pid" >/dev/null 2>&1
}
cmd_start() {
ensure_dirs
local background=0
if [[ "${1:-}" == "--background" ]]; then
background=1
shift
fi
local ts main_log stdout_log
ts="$(date +%Y%m%d-%H%M%S)"
main_log="$LOG_DIR/main_${ts}.log"
stdout_log="$LOG_DIR/stdout_${ts}.log"
local run_cmd=(
uv run examples/run_examples.py
--auto-mode
--write-rerun
--main-log "$main_log"
--logs-dir "$LOG_DIR"
)
if [[ "$background" -eq 1 ]]; then
if [[ -f "$PID_FILE" ]]; then
local pid
pid="$(cat "$PID_FILE" 2>/dev/null || true)"
if is_running "$pid"; then
echo "examples/run_examples.py already running (pid=$pid)."
exit 1
fi
fi
(
trap '' HUP
export EXAMPLES_INTERACTIVE_MODE="${EXAMPLES_INTERACTIVE_MODE:-auto}"
export APPLY_PATCH_AUTO_APPROVE="${APPLY_PATCH_AUTO_APPROVE:-1}"
export SHELL_AUTO_APPROVE="${SHELL_AUTO_APPROVE:-1}"
export AUTO_APPROVE_MCP="${AUTO_APPROVE_MCP:-1}"
export EXAMPLES_INCLUDE_INTERACTIVE="${EXAMPLES_INCLUDE_INTERACTIVE:-1}"
export EXAMPLES_INCLUDE_SERVER="${EXAMPLES_INCLUDE_SERVER:-0}"
export EXAMPLES_INCLUDE_AUDIO="${EXAMPLES_INCLUDE_AUDIO:-0}"
export EXAMPLES_INCLUDE_EXTERNAL="${EXAMPLES_INCLUDE_EXTERNAL:-0}"
cd "$ROOT"
exec "${run_cmd[@]}" "$@" > >(tee "$stdout_log") 2>&1
) &
local pid=$!
echo "$pid" >"$PID_FILE"
echo "Started run_examples.py (pid=$pid)"
echo "Main log: $main_log"
echo "Stdout log: $stdout_log"
echo "Run '.agents/skills/examples-auto-run/scripts/run.sh validate \"$main_log\"' after it finishes."
return 0
fi
export EXAMPLES_INTERACTIVE_MODE="${EXAMPLES_INTERACTIVE_MODE:-auto}"
export APPLY_PATCH_AUTO_APPROVE="${APPLY_PATCH_AUTO_APPROVE:-1}"
export SHELL_AUTO_APPROVE="${SHELL_AUTO_APPROVE:-1}"
export AUTO_APPROVE_MCP="${AUTO_APPROVE_MCP:-1}"
export EXAMPLES_INCLUDE_INTERACTIVE="${EXAMPLES_INCLUDE_INTERACTIVE:-1}"
export EXAMPLES_INCLUDE_SERVER="${EXAMPLES_INCLUDE_SERVER:-0}"
export EXAMPLES_INCLUDE_AUDIO="${EXAMPLES_INCLUDE_AUDIO:-0}"
export EXAMPLES_INCLUDE_EXTERNAL="${EXAMPLES_INCLUDE_EXTERNAL:-0}"
cd "$ROOT"
set +e
"${run_cmd[@]}" "$@" 2>&1 | tee "$stdout_log"
local run_status=${PIPESTATUS[0]}
set -e
return "$run_status"
}
cmd_stop() {
if [[ ! -f "$PID_FILE" ]]; then
echo "No pid file; nothing to stop."
return 0
fi
local pid
pid="$(cat "$PID_FILE" 2>/dev/null || true)"
if [[ -z "$pid" ]]; then
rm -f "$PID_FILE"
echo "Pid file empty; cleaned."
return 0
fi
if ! is_running "$pid"; then
rm -f "$PID_FILE"
echo "Process $pid not running; cleaned pid file."
return 0
fi
echo "Stopping pid $pid ..."
kill "$pid" 2>/dev/null || true
sleep 1
if is_running "$pid"; then
echo "Sending SIGKILL to $pid ..."
kill -9 "$pid" 2>/dev/null || true
fi
rm -f "$PID_FILE"
echo "Stopped."
}
cmd_status() {
if [[ -f "$PID_FILE" ]]; then
local pid
pid="$(cat "$PID_FILE" 2>/dev/null || true)"
if is_running "$pid"; then
echo "Running (pid=$pid)"
return 0
fi
fi
echo "Not running."
}
cmd_logs() {
ensure_dirs
ls -1t "$LOG_DIR"
}
cmd_tail() {
ensure_dirs
local file="${1:-}"
if [[ -z "$file" ]]; then
file="$(ls -1t "$LOG_DIR" | head -n1)"
fi
if [[ -z "$file" ]]; then
echo "No log files yet."
exit 1
fi
tail -f "$LOG_DIR/$file"
}
collect_rerun() {
ensure_dirs
local log_file="${1:-}"
if [[ -z "$log_file" ]]; then
log_file="$(ls -1t "$LOG_DIR"/main_*.log 2>/dev/null | head -n1)"
fi
if [[ -z "$log_file" ]] || [[ ! -f "$log_file" ]]; then
echo "No main log file found."
exit 1
fi
cd "$ROOT"
uv run examples/run_examples.py --collect "$log_file" --output "$RERUN_FILE"
}
cmd_rerun() {
ensure_dirs
local file="${1:-$RERUN_FILE}"
if [[ ! -s "$file" ]]; then
echo "Rerun list is empty: $file"
exit 0
fi
local ts main_log stdout_log
ts="$(date +%Y%m%d-%H%M%S)"
main_log="$LOG_DIR/main_${ts}.log"
stdout_log="$LOG_DIR/stdout_${ts}.log"
cd "$ROOT"
export EXAMPLES_INTERACTIVE_MODE="${EXAMPLES_INTERACTIVE_MODE:-auto}"
export APPLY_PATCH_AUTO_APPROVE="${APPLY_PATCH_AUTO_APPROVE:-1}"
export SHELL_AUTO_APPROVE="${SHELL_AUTO_APPROVE:-1}"
export AUTO_APPROVE_MCP="${AUTO_APPROVE_MCP:-1}"
set +e
uv run examples/run_examples.py --auto-mode --rerun-file "$file" --write-rerun --main-log "$main_log" --logs-dir "$LOG_DIR" 2>&1 | tee "$stdout_log"
local run_status=${PIPESTATUS[0]}
set -e
return "$run_status"
}
usage() {
cat <<'EOF'
Usage: run.sh <start|stop|status|logs|tail|collect|rerun> [args...]
Commands:
start [--filter ... | other args] Run examples in auto mode (foreground). Pass --background to run detached.
stop Kill the running auto-run (if any).
status Show whether it is running.
logs List log files (.tmp/examples-start-logs).
tail [logfile] Tail the latest (or specified) log.
collect [main_log] Parse a main log and write failed examples to .tmp/examples-rerun.txt.
rerun [rerun_file] Run only the examples listed in .tmp/examples-rerun.txt.
Environment overrides:
EXAMPLES_INTERACTIVE_MODE (default auto)
EXAMPLES_INCLUDE_SERVER/INTERACTIVE/AUDIO/EXTERNAL (defaults: 0/1/0/0)
APPLY_PATCH_AUTO_APPROVE, SHELL_AUTO_APPROVE, AUTO_APPROVE_MCP (default 1 in auto mode)
EOF
}
default_cmd="start"
if [[ $# -eq 0 && -s "$RERUN_FILE" ]]; then
default_cmd="rerun"
fi
case "${1:-$default_cmd}" in
start) shift || true; cmd_start "$@" ;;
stop) shift || true; cmd_stop ;;
status) shift || true; cmd_status ;;
logs) shift || true; cmd_logs ;;
tail) shift; cmd_tail "${1:-}" ;;
collect) shift || true; collect_rerun "${1:-}" ;;
rerun) shift || true; cmd_rerun "${1:-}" ;;
*) usage; exit 1 ;;
esac
================================================
FILE: .agents/skills/final-release-review/SKILL.md
================================================
---
name: final-release-review
description: Perform a release-readiness review by locating the previous release tag from remote tags and auditing the diff (e.g., v1.2.3...<commit>) for breaking changes, regressions, improvement opportunities, and risks before releasing openai-agents-python.
---
# Final Release Review
## Purpose
Use this skill when validating the latest release candidate commit (default tip of `origin/main`) for release. It guides you to fetch remote tags, pick the previous release tag, and thoroughly inspect the `BASE_TAG...TARGET` diff for breaking changes, introduced bugs/regressions, improvement opportunities, and release risks.
The review must be stable and actionable: avoid variance between runs by using explicit gate rules, and never produce a `BLOCKED` call without concrete evidence and clear unblock actions.
## Quick start
1. Ensure repository root: `pwd` → `path-to-workspace/openai-agents-python`.
2. Sync tags and pick base (default `v*`):
```bash
BASE_TAG="$(.agents/skills/final-release-review/scripts/find_latest_release_tag.sh origin 'v*')"
```
3. Choose target commit (default tip of `origin/main`, ensure fresh): `git fetch origin main --prune` then `TARGET="$(git rev-parse origin/main)"`.
4. Snapshot scope:
```bash
git diff --stat "${BASE_TAG}"..."${TARGET}"
git diff --dirstat=files,0 "${BASE_TAG}"..."${TARGET}"
git log --oneline --reverse "${BASE_TAG}".."${TARGET}"
git diff --name-status "${BASE_TAG}"..."${TARGET}"
```
5. Deep review using `references/review-checklist.md` to spot breaking changes, regressions, and improvement chances.
6. Capture findings and call the release gate: ship/block with conditions; propose focused tests for risky areas.
## Deterministic gate policy
- Default to **🟢 GREEN LIGHT TO SHIP** unless at least one blocking trigger below is satisfied.
- Use **🔴 BLOCKED** only when you can cite concrete release-blocking evidence and provide actionable unblock steps.
- Blocking triggers (at least one required for `BLOCKED`):
- A confirmed regression or bug introduced in `BASE...TARGET` (for example, failing targeted test, incompatible behavior in diff, or removed behavior without fallback).
- A confirmed breaking public API/protocol/config change with missing or mismatched versioning and no migration path (for example, patch release for a breaking change).
- A concrete data-loss, corruption, or security-impacting change with unresolved mitigation.
- A release-critical packaging/build/runtime path is broken by the diff (not speculative).
- Non-blocking by itself:
- Large diff size, broad refactor, or many touched files.
- "Could regress" risk statements without concrete evidence.
- Not running tests locally.
- If evidence is incomplete, issue **🟢 GREEN LIGHT TO SHIP** with targeted validation follow-ups instead of `BLOCKED`.
## Workflow
- **Prepare**
- Run the quick-start tag command to ensure you use the latest remote tag. If the tag pattern differs, override the pattern argument (e.g., `'*.*.*'`).
- If the user specifies a base tag, prefer it but still fetch remote tags first.
- Keep the working tree clean to avoid diff noise.
- **Assumptions**
- Assume the target commit (default `origin/main` tip) has already passed `$code-change-verification` in CI unless the user says otherwise.
- Do not block a release solely because you did not run tests locally; focus on concrete behavioral or API risks.
- Release policy: routine releases use patch versions; use minor only for breaking changes or major feature additions. Major versions are reserved until the 1.0 release.
- **Map the diff**
- Use `--stat`, `--dirstat`, and `--name-status` outputs to spot hot directories and file types.
- For suspicious files, prefer `git diff --word-diff BASE...TARGET -- <path>`.
- Note any deleted or newly added tests, config, migrations, or scripts.
- **Analyze risk**
- Walk through the categories in `references/review-checklist.md` (breaking changes, regression clues, improvement opportunities).
- When you suspect a risk, cite the specific file/commit and explain the behavioral impact.
- For every finding, include all of: `Evidence`, `Impact`, and `Action`.
- Severity calibration:
- **🟢 LOW**: low blast radius or clearly covered behavior; no release gate impact.
- **🟡 MODERATE**: plausible user-facing regression signal; needs validation but not a confirmed blocker.
- **🔴 HIGH**: confirmed or strongly evidenced release-blocking issue.
- Suggest minimal, high-signal validation commands (targeted tests or linters) instead of generic reruns when time is tight.
- Breaking changes do not automatically require a BLOCKED release call when they are already covered by an appropriate version bump and migration/upgrade notes; only block when the bump is missing/mismatched (e.g., patch bump) or when the breaking change introduces unresolved risk.
- **Form a recommendation**
- State BASE_TAG and TARGET explicitly.
- Provide a concise diff summary (key directories/files and counts).
- List: breaking-change candidates, probable regressions/bugs, improvement opportunities, missing release notes/migrations.
- Recommend ship/block and the exact checks needed to unblock if blocking. If a breaking change is properly versioned (minor/major), you may still recommend a GREEN LIGHT TO SHIP while calling out the change. Use emoji and boldface in the release call to make the gate obvious.
- If you cannot provide a concrete unblock checklist item, do not use `BLOCKED`.
## Output format (required)
All output must be in English.
Use the following report structure in every response produced by this skill. Be proactive and decisive: make a clear ship/block call near the top, and assign an explicit risk level (LOW/MODERATE/HIGH) to each finding with a short impact statement. Avoid overly cautious hedging when the risk is low and tests passed.
Always use the fixed repository URL in the Diff section (`https://github.com/openai/openai-agents-python/compare/...`). Do not use `${GITHUB_REPOSITORY}` or any other template variable. Format risk levels as bold emoji labels: **🟢 LOW**, **🟡 MODERATE**, **🔴 HIGH**.
Every risk finding must contain an actionable next step. If the report uses `**🔴 BLOCKED**`, include an `Unblock checklist` section with at least one concrete command/task and a pass condition.
```
### Release readiness review (<tag> -> TARGET <ref>)
This is a release readiness report done by `$final-release-review` skill.
### Diff
https://github.com/openai/openai-agents-python/compare/<tag>...<target-commit>
### Release call:
**<🟢 GREEN LIGHT TO SHIP | 🔴 BLOCKED>** <one-line rationale>
### Scope summary:
- <N files changed (+A/-D); key areas touched: ...>
### Risk assessment (ordered by impact):
1) **<Finding title>**
- Risk: **<🟢 LOW | 🟡 MODERATE | 🔴 HIGH>**. <Impact statement in one sentence.>
- Evidence: <specific diff/test/commit signal; avoid generic statements>
- Files: <path(s)>
- Action: <concrete next step command/task with pass criteria>
2) ...
### Unblock checklist (required when Release call is BLOCKED):
1. [ ] <concrete check/fix>
- Exit criteria: <what must be true to unblock>
2. ...
### Notes:
- <working tree status, tag/target assumptions, or re-run guidance>
```
If no risks are found, include a “No material risks identified” line under Risk assessment and still provide a ship call. If you did not run local verification, do not add a verification status section or use it as a release blocker; note any assumptions briefly in Notes.
If the report is not blocked, omit the `Unblock checklist` section.
### Resources
- `scripts/find_latest_release_tag.sh`: Fetches remote tags and returns the newest tag matching a pattern (default `v*`).
- `references/review-checklist.md`: Detailed signals and commands for spotting breaking changes, regressions, and release polish gaps.
================================================
FILE: .agents/skills/final-release-review/agents/openai.yaml
================================================
interface:
display_name: "Final Release Review"
short_description: "Audit a release candidate against the previous tag"
default_prompt: "Use $final-release-review to audit the release candidate diff against the previous release tag and call the ship/block gate."
================================================
FILE: .agents/skills/final-release-review/references/review-checklist.md
================================================
# Release Diff Review Checklist
## Quick commands
- Sync tags: `git fetch origin --tags --prune`.
- Identify latest release tag (default pattern `v*`): `git tag -l 'v*' --sort=-v:refname | head -n1` or use `.agents/skills/final-release-review/scripts/find_latest_release_tag.sh`.
- Generate overview: `git diff --stat BASE...TARGET`, `git diff --dirstat=files,0 BASE...TARGET`, `git log --oneline --reverse BASE..TARGET`.
- Inspect risky files quickly: `git diff --name-status BASE...TARGET`, `git diff --word-diff BASE...TARGET -- <path>`.
## Gate decision matrix
- Choose `🟢 GREEN LIGHT TO SHIP` when no concrete blocking trigger is found.
- Choose `🔴 BLOCKED` only when at least one blocking trigger has concrete evidence and a defined unblock action.
- Blocking triggers:
- Confirmed regression/bug introduced in the diff.
- Confirmed breaking public API/protocol/config change with missing or mismatched versioning/migration path.
- Concrete data-loss/corruption/security-impacting issue with unresolved mitigation.
- Release-critical build/package/runtime break introduced by the diff.
- Non-blocking by itself:
- Large refactor or high file count.
- Speculative risk without evidence.
- Not running tests locally.
- If uncertain, keep gate green and provide focused follow-up checks.
## Actionability contract
- Every risk finding should include:
- `Evidence`: specific file/commit/diff/test signal.
- `Impact`: one-sentence user or runtime effect.
- `Action`: concrete command/task with pass criteria.
- A `BLOCKED` report must contain an `Unblock checklist` with at least one executable item.
- If no executable unblock item exists, do not block; downgrade to green with follow-up checks.
## Breaking change signals
- Public API surface: removed/renamed modules, classes, functions, or re-exports; changed parameters/return types, default values changed, new required options, stricter validation.
- Protocol/schema: request/response fields added/removed/renamed, enum changes, JSON shape changes, ID formats, pagination defaults.
- Config/CLI/env: renamed flags, default behavior flips, removed fallbacks, environment variable changes, logging levels tightened.
- Dependencies/platform: Python version requirement changes, dependency major bumps, `pyproject.toml`/`uv.lock` changes, removed or renamed extras.
- Persistence/data: migration scripts missing, data model changes, stored file formats, cache keys altered without invalidation.
- Docs/examples drift: examples still reflect old behavior or lack migration note.
## Regression risk clues
- Large refactors with light test deltas or deleted tests; new `skip`/`todo` markers.
- Concurrency/timing: new async flows, asyncio event-loop changes, retries, timeouts, debounce/caching changes, race-prone patterns.
- Error handling: catch blocks removed, swallowed errors, broader catch-all added without logging, stricter throws without caller updates.
- Stateful components: mutable shared state, global singletons, lifecycle changes (init/teardown), resource cleanup removal.
- Third-party changes: swapped core libraries, feature flags toggled, observability removed or gated.
## Improvement opportunities
- Missing coverage for new code paths; add focused tests.
- Performance: obvious N+1 loops, repeated I/O without caching, excessive serialization.
- Developer ergonomics: unclear naming, missing inline docs for public APIs, missing examples for new features.
- Release hygiene: add migration/upgrade note when behavior changes; ensure changelog/notes capture user-facing shifts.
## Evidence to capture in the review output
- BASE tag and TARGET ref used for the diff; confirm tags fetched.
- High-level diff stats and key directories touched.
- Concrete files/commits that indicate breaking changes or risk, with brief rationale.
- Tests or commands suggested to validate suspected risks (include pass criteria).
- Explicit release gate call (ship/block) with conditions to unblock.
- `Unblock checklist` section when (and only when) gate is `BLOCKED`.
================================================
FILE: .agents/skills/final-release-review/scripts/find_latest_release_tag.sh
================================================
#!/usr/bin/env bash
set -euo pipefail
remote="${1:-origin}"
pattern="${2:-v*}"
# Sync tags from the remote to ensure the latest release tag is available locally.
git fetch "$remote" --tags --prune --quiet
latest_tag=$(git tag -l "$pattern" --sort=-v:refname | head -n1)
if [[ -z "$latest_tag" ]]; then
echo "No tags found matching pattern '$pattern' after fetching from $remote." >&2
exit 1
fi
echo "$latest_tag"
================================================
FILE: .agents/skills/implementation-strategy/SKILL.md
================================================
---
name: implementation-strategy
description: Decide how to implement runtime and API changes in openai-agents-python before editing code. Use when a task changes exported APIs, runtime behavior, serialized state, tests, or docs and you need to choose the compatibility boundary, whether shims or migrations are warranted, and when unreleased interfaces can be rewritten directly.
---
# Implementation Strategy
## Overview
Use this skill before editing code when the task changes runtime behavior or anything that might look like a compatibility concern. The goal is to keep implementations simple while protecting real released contracts.
## Quick start
1. Identify the surface you are changing: released public API, unreleased branch-local API, internal helper, persisted schema, wire protocol, CLI/config/env surface, or docs/examples only.
2. Determine the latest release boundary from `origin` first, and only fall back to local tags when remote tags are unavailable:
```bash
BASE_TAG="$(.agents/skills/final-release-review/scripts/find_latest_release_tag.sh origin 'v*' 2>/dev/null || git tag -l 'v*' --sort=-v:refname | head -n1)"
echo "$BASE_TAG"
```
3. Judge breaking-change risk against that latest release tag, not against unreleased branch churn or post-tag changes already on `main`. If the command fell back to local tags, treat the result as potentially stale and say so.
4. Prefer the simplest implementation that satisfies the current task. Update callers, tests, docs, and examples directly instead of preserving superseded unreleased interfaces.
5. Add a compatibility layer only when there is a concrete released consumer, an otherwise supported durable external state boundary that requires it, or when the user explicitly asks for a migration path.
## Compatibility boundary rules
- Released public API or documented external behavior: preserve compatibility or provide an explicit migration path.
- Persisted schema, serialized state, wire protocol, CLI flags, environment variables, and externally consumed config: treat as compatibility-sensitive when they are part of the latest release or when the repo explicitly intends to preserve them across commits, processes, or machines.
- Python-specific durable surfaces such as `RunState`, session persistence, exported dataclass constructor order, and documented model/provider configuration should be treated as compatibility-sensitive when they were part of the latest release tag or are explicitly supported as a shared durability boundary.
- Interface changes introduced only on the current branch: not a compatibility target. Rewrite them directly.
- Interface changes present on `main` but added after the latest release tag: not a semver breaking change by themselves. Rewrite them directly unless they already define a released or explicitly supported durable external state boundary.
- Internal helpers, private types, same-branch tests, fixtures, and examples: update them directly instead of adding adapters.
- Unreleased persisted schema versions on `main` may be renumbered or squashed before release when intermediate snapshots are intentionally unsupported. When you do that, update the support set and tests together so the boundary is explicit.
## Default implementation stance
- Prefer deletion or replacement over aliases, overloads, shims, feature flags, and dual-write logic when the old shape is unreleased.
- Do not preserve a confusing abstraction just because it exists in the current branch diff.
- If review feedback claims a change is breaking, verify it against the latest release tag and actual external impact before accepting the feedback.
- If a change truly crosses the latest released contract boundary, call that out explicitly in the ExecPlan, release notes context, and user-facing summary.
## When to stop and confirm
- The change would alter behavior shipped in the latest release tag.
- The change would modify durable external data, protocol formats, or serialized state.
- The user explicitly asked for backward compatibility, deprecation, or migration support.
## Output expectations
When this skill materially affects the implementation approach, state the decision briefly in your reasoning or handoff, for example:
- `Compatibility boundary: latest release tag v0.x.y; branch-local interface rewrite, no shim needed.`
- `Compatibility boundary: released RunState schema; preserve compatibility and add migration coverage.`
================================================
FILE: .agents/skills/implementation-strategy/agents/openai.yaml
================================================
interface:
display_name: "Implementation Strategy"
short_description: "Choose a compatibility-aware implementation plan"
default_prompt: "Use $implementation-strategy to choose the implementation approach and compatibility boundary before editing runtime code."
================================================
FILE: .agents/skills/openai-knowledge/SKILL.md
================================================
---
name: openai-knowledge
description: Use when working with the OpenAI API (Responses API) or OpenAI platform features (tools, streaming, Realtime API, auth, models, rate limits, MCP) and you need authoritative, up-to-date documentation (schemas, examples, limits, edge cases). Prefer the OpenAI Developer Documentation MCP server tools when available; otherwise guide the user to enable `openaiDeveloperDocs`.
---
# OpenAI Knowledge
## Overview
Use the OpenAI Developer Documentation MCP server to search and fetch exact docs (markdown), then base your answer on that text instead of guessing.
## Workflow
### 1) Check whether the Docs MCP server is available
If the `mcp__openaiDeveloperDocs__*` tools are available, use them.
If you are unsure, run `codex mcp list` and check for `openaiDeveloperDocs`.
### 2) Use MCP tools to pull exact docs
- Search first, then fetch the specific page or pages.
- `mcp__openaiDeveloperDocs__search_openai_docs` → pick the best URL.
- `mcp__openaiDeveloperDocs__fetch_openai_doc` → retrieve the exact markdown (optionally with an `anchor`).
- When you need endpoint schemas or parameters, use:
- `mcp__openaiDeveloperDocs__get_openapi_spec`
- `mcp__openaiDeveloperDocs__list_api_endpoints`
Base your answer on the fetched text and quote or paraphrase it precisely. Do not invent flags, field names, defaults, or limits.
### 3) If MCP is not configured, guide setup (do not change config unless asked)
Provide one of these setup options, then ask the user to restart the Codex session so the tools load:
- CLI:
- `codex mcp add openaiDeveloperDocs --url https://developers.openai.com/mcp`
- Config file (`~/.codex/config.toml`):
- Add:
```toml
[mcp_servers.openaiDeveloperDocs]
url = "https://developers.openai.com/mcp"
```
Also point to: https://developers.openai.com/resources/docs-mcp#quickstart
================================================
FILE: .agents/skills/openai-knowledge/agents/openai.yaml
================================================
interface:
display_name: "OpenAI Knowledge"
short_description: "Pull authoritative OpenAI platform documentation"
default_prompt: "Use $openai-knowledge to fetch the exact OpenAI docs needed for this API or platform question."
================================================
FILE: .agents/skills/pr-draft-summary/SKILL.md
================================================
---
name: pr-draft-summary
description: Create a PR title and draft description after substantive code changes are finished. Trigger when wrapping up a moderate-or-larger change (runtime code, tests, build config, docs with behavior impact) and you need the PR-ready summary block with change summary plus PR draft text.
---
# PR Draft Summary
## Purpose
Produce the PR-ready summary required in this repository after substantive code work is complete: a concise summary plus a PR-ready title and draft description that begins with "This pull request <verb> ...". The block should be ready to paste into a PR for openai-agents-python.
## When to Trigger
- The task for this repo is finished (or ready for review) and it touched runtime code, tests, examples, docs with behavior impact, or build/test configuration.
- You are about to send the "work complete" response and need the PR block included.
- Skip only for trivial or conversation-only tasks where no PR-style summary is expected.
## Inputs to Collect Automatically (do not ask the user)
- Current branch: `git rev-parse --abbrev-ref HEAD`.
- Working tree: `git status -sb`.
- Untracked files: `git ls-files --others --exclude-standard` (use with `git status -sb` to ensure they are surfaced; `--stat` does not include them).
- Changed files: `git diff --name-only` (unstaged) and `git diff --name-only --cached` (staged); sizes via `git diff --stat` and `git diff --stat --cached`.
- Latest release tag (prefer remote-aware lookup): `LATEST_RELEASE_TAG=$(.agents/skills/final-release-review/scripts/find_latest_release_tag.sh origin 'v*' 2>/dev/null || git tag -l 'v*' --sort=-v:refname | head -n1)`.
- Base reference (use the branch's upstream, fallback to `origin/main`):
- `BASE_REF=$(git rev-parse --abbrev-ref --symbolic-full-name @{upstream} 2>/dev/null || echo origin/main)`.
- `BASE_COMMIT=$(git merge-base --fork-point "$BASE_REF" HEAD || git merge-base "$BASE_REF" HEAD || echo "$BASE_REF")`.
- Commits ahead of the base fork point: `git log --oneline --no-merges ${BASE_COMMIT}..HEAD`.
- Category signals for this repo: runtime (`src/agents/`), tests (`tests/`), examples (`examples/`), docs (`docs/`, `mkdocs.yml`), build/test config (`pyproject.toml`, `uv.lock`, `Makefile`, `.github/`).
## Workflow
1) Run the commands above without asking the user; compute `BASE_REF`/`BASE_COMMIT` first so later commands reuse them.
2) If there are no staged/unstaged/untracked changes and no commits ahead of `${BASE_COMMIT}`, reply briefly that no code changes were detected and skip emitting the PR block.
3) Infer change type from the touched paths listed under "Category signals"; classify as feature, fix, refactor, or docs-with-impact, and flag backward-compatibility risk only when the diff changes released public APIs, external config, persisted data, serialized state, or wire protocols. Judge that risk against `LATEST_RELEASE_TAG`, not unreleased branch-only churn.
4) Summarize changes in 1–3 short sentences using the key paths (top 5) and `git diff --stat` output; explicitly call out untracked files from `git status -sb`/`git ls-files --others --exclude-standard` because `--stat` does not include them. If the working tree is clean but there are commits ahead of `${BASE_COMMIT}`, summarize using those commit messages.
5) Choose the lead verb for the description: feature → `adds`, bug fix → `fixes`, refactor/perf → `improves` or `updates`, docs-only → `updates`.
6) Suggest a branch name. If already off main, keep it; otherwise propose `feat/<slug>`, `fix/<slug>`, or `docs/<slug>` based on the primary area (e.g., `docs/pr-draft-summary-guidance`).
7) If the current branch matches `issue-<number>` (digits only), keep that branch suggestion. Optionally pull light issue context (for example via the GitHub API) when available, but do not block or retry if it is not. When an issue number is present, reference `https://github.com/openai/openai-agents-python/issues/<number>` and include an auto-closing line such as `This pull request resolves #<number>.`.
8) Draft the PR title and description using the template below.
9) Output only the block in "Output Format". Keep any surrounding status note minimal and in English.
## Output Format
When closing out a task and the summary block is desired, add this concise Markdown block (English only) after any brief status note. If the user says they do not want it, skip this section.
```
# Pull Request Draft
## Branch name suggestion
git checkout -b <kebab-case suggestion, e.g., feat/pr-draft-summary-skill>
## Title
<single-line imperative title, which can be a commit message; if a common prefix like chore: and feat: etc., having them is preferred>
## Description
<include what you changed plus a draft pull request title and description for your local changes; start the description with prose such as "This pull request resolves/updates/adds ..." using a verb that matches the change (you can use bullets later), explain the change background (for bugs, clearly describe the bug, symptoms, or repro; for features, what is needed and why), any behavior changes or considerations to be aware of, and you do not need to mention tests you ran.>
```
Keep it tight—no redundant prose around the block, and avoid repeating details between `Changes` and the description. Tests do not need to be listed unless specifically requested.
================================================
FILE: .agents/skills/pr-draft-summary/agents/openai.yaml
================================================
interface:
display_name: "PR Draft Summary"
short_description: "Draft the repo-ready PR title and description"
default_prompt: "Use $pr-draft-summary to generate the PR-ready summary block, title, and draft description for the current changes."
================================================
FILE: .agents/skills/test-coverage-improver/SKILL.md
================================================
---
name: test-coverage-improver
description: 'Improve test coverage in the OpenAI Agents Python repository: run `make coverage`, inspect coverage artifacts, identify low-coverage files, propose high-impact tests, and confirm with the user before writing tests.'
---
# Test Coverage Improver
## Overview
Use this skill whenever coverage needs assessment or improvement (coverage regressions, failing thresholds, or user requests for stronger tests). It runs the coverage suite, analyzes results, highlights the biggest gaps, and prepares test additions while confirming with the user before changing code.
## Quick Start
1. From the repo root run `make coverage` to regenerate `.coverage` data and `coverage.xml`.
2. Collect artifacts: `.coverage` and `coverage.xml`, plus the console output from `coverage report -m` for drill-downs.
3. Summarize coverage: total percentages, lowest files, and uncovered lines/paths.
4. Draft test ideas per file: scenario, behavior under test, expected outcome, and likely coverage gain.
5. Ask the user for approval to implement the proposed tests; pause until they agree.
6. After approval, write the tests in `tests/`, rerun `make coverage`, and then run `$code-change-verification` before marking work complete.
## Workflow Details
- **Run coverage**: Execute `make coverage` at repo root. Avoid watch flags and keep prior coverage artifacts only if comparing trends.
- **Parse summaries efficiently**:
- Prefer the console output from `coverage report -m` for file-level totals; fallback to `coverage.xml` for tooling or spreadsheets.
- Use `uv run coverage html` to generate `htmlcov/index.html` if you need an interactive drill-down.
- **Prioritize targets**:
- Public APIs or shared utilities in `src/agents/` before examples or docs.
- Files with low statement coverage or newly added code at 0%.
- Recent bug fixes or risky code paths (error handling, retries, timeouts, concurrency).
- **Design impactful tests**:
- Hit uncovered paths: error cases, boundary inputs, optional flags, and cancellation/timeouts.
- Cover combinational logic rather than trivial happy paths.
- Place tests under `tests/` and avoid flaky async timing.
- **Coordinate with the user**: Present a numbered, concise list of proposed test additions and expected coverage gains. Ask explicitly before editing code or fixtures.
- **After implementation**: Rerun coverage, report the updated summary, and note any remaining low-coverage areas.
## Notes
- Keep any added comments or code in English.
- Do not create `scripts/`, `references/`, or `assets/` unless needed later.
- If coverage artifacts are missing or stale, rerun `pnpm test:coverage` instead of guessing.
================================================
FILE: .agents/skills/test-coverage-improver/agents/openai.yaml
================================================
interface:
display_name: "Test Coverage Improver"
short_description: "Analyze coverage gaps and propose high-impact tests"
default_prompt: "Use $test-coverage-improver to analyze coverage gaps, propose high-impact tests, and update coverage after approval."
================================================
FILE: .github/ISSUE_TEMPLATE/bug_report.md
================================================
---
name: Bug report
about: Report a bug
title: ''
labels: bug
assignees: ''
---
### Please read this first
- **Have you read the docs?**[Agents SDK docs](https://openai.github.io/openai-agents-python/)
- **Have you searched for related issues?** Others may have faced similar issues.
### Describe the bug
A clear and concise description of what the bug is.
### Debug information
- Agents SDK version: (e.g. `v0.0.3`)
- Python version (e.g. Python 3.10)
### Repro steps
Ideally provide a minimal python script that can be run to reproduce the bug.
### Expected behavior
A clear and concise description of what you expected to happen.
================================================
FILE: .github/ISSUE_TEMPLATE/feature_request.md
================================================
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: enhancement
assignees: ''
---
### Please read this first
- **Have you read the docs?**[Agents SDK docs](https://openai.github.io/openai-agents-python/)
- **Have you searched for related issues?** Others may have had similar requests
### Describe the feature
What is the feature you're requesting? How would it work? Please provide examples and details if possible.
================================================
FILE: .github/ISSUE_TEMPLATE/model_provider.md
================================================
---
name: Custom model providers
about: Questions or bugs about using non-OpenAI models
title: ''
labels: bug
assignees: ''
---
### Please read this first
- **Have you read the custom model provider docs, including the 'Common issues' section?** [Model provider docs](https://openai.github.io/openai-agents-python/models/#using-other-llm-providers)
- **Have you searched for related issues?** Others may have faced similar issues.
### Describe the question
A clear and concise description of what the question or bug is.
### Debug information
- Agents SDK version: (e.g. `v0.0.3`)
- Python version (e.g. Python 3.10)
### Repro steps
Ideally provide a minimal python script that can be run to reproduce the issue.
### Expected behavior
A clear and concise description of what you expected to happen.
================================================
FILE: .github/ISSUE_TEMPLATE/question.md
================================================
---
name: Question
about: Questions about the SDK
title: ''
labels: question
assignees: ''
---
### Please read this first
- **Have you read the docs?**[Agents SDK docs](https://openai.github.io/openai-agents-python/)
- **Have you searched for related issues?** Others may have had similar requests
### Question
Describe your question. Provide details if available.
================================================
FILE: .github/PULL_REQUEST_TEMPLATE/pull_request_template.md
================================================
### Summary
<!-- Please give a short summary of the change and the problem this solves. -->
### Test plan
<!-- Please explain how this was tested -->
### Issue number
<!-- For example: "Closes #1234" -->
### Checks
- [ ] I've added new tests (if relevant)
- [ ] I've added/updated the relevant documentation
- [ ] I've run `make lint` and `make format`
- [ ] I've made sure tests pass
================================================
FILE: .github/codex/prompts/pr-labels.md
================================================
# PR auto-labeling
You are Codex running in CI to propose labels for a pull request in the openai-agents-python repository.
Inputs:
- PR context: .tmp/pr-labels/pr-context.json
- PR diff: .tmp/pr-labels/changes.diff
- Changed files: .tmp/pr-labels/changed-files.txt
Task:
- Inspect the PR context, diff, and changed files.
- Output JSON with a single top-level key: "labels" (array of strings).
- Only use labels from the allowed list.
- Prefer false negatives over false positives. If you are unsure, leave the label out.
- Return the smallest accurate set of labels for the PR's primary intent and primary surface area.
Allowed labels:
- documentation
- project
- bug
- enhancement
- dependencies
- feature:chat-completions
- feature:core
- feature:lite-llm
- feature:mcp
- feature:realtime
- feature:sessions
- feature:tracing
- feature:voice
Important guidance:
- `documentation`, `project`, and `dependencies` are also derived deterministically elsewhere in the workflow. You may include them when the evidence is explicit, but do not stretch to infer them from weak signals.
- Use direct evidence from changed implementation files and the dominant intent of the diff. Do not add labels based only on tests, examples, comments, docstrings, imports, type plumbing, or shared helpers.
- Cross-cutting features often touch many adapters and support layers. Only add a `feature:*` label when that area is itself a primary user-facing surface of the PR, not when it receives incidental compatibility or parity updates.
- Mentions of a feature area in helper names, comments, tests, or trace metadata are not enough by themselves.
- Prefer the most general accurate feature label over a larger set of narrower labels. For broad runtime work, this usually means `feature:core`.
- A secondary `feature:*` label needs two things: a non-test implementation/docs change in that area, and evidence that the area is a user-facing outcome of the PR rather than support work for another feature.
Label rules:
- documentation: Documentation changes (docs/), or src/ changes that only modify comments/docstrings without behavior changes. If only comments/docstrings change in src/, do not add bug/enhancement.
- project: Any change to pyproject.toml.
- dependencies: Dependencies are added/removed/updated (pyproject.toml dependency sections or uv.lock changes).
- bug: The PR's primary intent is to correct existing incorrect behavior. Use only with strong evidence such as the title/body/tests clearly describing a fix, regression, crash, incorrect output, or restore/preserve behavior. Do not add `bug` for incidental hardening that accompanies a new feature.
- enhancement: The PR's primary intent is to add or expand functionality. Prefer `enhancement` for feature work even if the diff also contains some fixes or guardrails needed to support that feature.
- bug vs enhancement: Prefer exactly one of these. Include both only when the PR clearly contains two separate substantial changes and both are first-order outcomes.
- feature:chat-completions: Chat Completions support or conversion is a primary deliverable of the PR. Do not add it for a small compatibility guard or parity update in `chatcmpl_converter.py`.
- feature:core: Core agent loop, tool calls, run pipeline, or other central runtime behavior is a primary surface of the PR. For cross-cutting runtime changes, this is usually the single best feature label.
- feature:lite-llm: LiteLLM adapter/provider behavior is a primary deliverable of the PR.
- feature:mcp: MCP-specific behavior or APIs are a primary deliverable of the PR. Do not add it for incidental hosted/deferred tool plumbing touched by broader runtime work.
- feature:realtime: Realtime-specific behavior, API shape, or session semantics are a primary deliverable of the PR. Do not add it for small parity updates in realtime adapters.
- feature:sessions: Session or memory behavior is a primary deliverable of the PR. Do not add it for persistence updates that merely support a broader feature.
- feature:tracing: Tracing is a primary deliverable of the PR. Do not add it for trace naming or metadata changes that accompany another feature.
- feature:voice: Voice pipeline behavior is a primary deliverable of the PR.
Decision process:
1. Determine the PR's primary intent in one sentence from the PR title/body and dominant runtime diff.
2. Start with zero labels.
3. Add `bug` or `enhancement` conservatively.
4. Add only the minimum `feature:*` labels needed to describe the primary surface area.
5. Treat extra `feature:*` labels as guilty until proven necessary. Keep them only when the PR would feel mislabeled without them.
6. Re-check every label. Drop any label that is supported only by secondary edits, parity work, or touched files outside the PR's main focus.
Examples:
- If a new cross-cutting runtime feature touches Chat Completions, Realtime, Sessions, MCP, and tracing support code for parity, prefer `["enhancement","feature:core"]` over labeling every touched area.
- If a PR mainly adds a Responses/core capability and touches realtime or sessions files only to keep shared serialization, replay, or adapters in sync, do not add `feature:realtime` or `feature:sessions`.
- If a PR mainly fixes realtime transport behavior and also updates tests/docs, prefer `["bug","feature:realtime"]`.
Output:
- JSON only (no code fences, no extra text).
- Example: {"labels":["enhancement","feature:core"]}
================================================
FILE: .github/codex/prompts/release-review.md
================================================
# Release readiness review
You are Codex running in CI. Produce a release readiness report for this repository.
Steps:
1. Determine the latest release tag (use local tags only):
- `git tag -l 'v*' --sort=-v:refname | head -n1`
2. Set TARGET to the current commit SHA: `git rev-parse HEAD`.
3. Collect diff context for BASE_TAG...TARGET:
- `git diff --stat BASE_TAG...TARGET`
- `git diff --dirstat=files,0 BASE_TAG...TARGET`
- `git diff --name-status BASE_TAG...TARGET`
- `git log --oneline --reverse BASE_TAG..TARGET`
4. Review `.agents/skills/final-release-review/references/review-checklist.md` and analyze the diff.
Output:
- Write the report in the exact format used by `$final-release-review` (see `.agents/skills/final-release-review/SKILL.md`).
- Use the compare URL: `https://github.com/${GITHUB_REPOSITORY}/compare/BASE_TAG...TARGET`.
- Include clear ship/block call and risk levels.
- If no risks are found, include "No material risks identified".
Constraints:
- Output only the report (no code fences, no extra commentary).
================================================
FILE: .github/codex/schemas/pr-labels.json
================================================
{
"type": "object",
"additionalProperties": false,
"required": ["labels"],
"properties": {
"labels": {
"type": "array",
"items": {
"type": "string",
"enum": [
"documentation",
"project",
"bug",
"enhancement",
"dependencies",
"feature:chat-completions",
"feature:core",
"feature:lite-llm",
"feature:mcp",
"feature:realtime",
"feature:sessions",
"feature:tracing",
"feature:voice"
]
}
}
}
}
================================================
FILE: .github/dependabot.yml
================================================
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "monthly"
open-pull-requests-limit: 5
labels:
- "dependencies"
================================================
FILE: .github/scripts/detect-changes.sh
================================================
#!/usr/bin/env bash
set -euo pipefail
mode="${1:-code}"
base_sha="${2:-${BASE_SHA:-}}"
head_sha="${3:-${HEAD_SHA:-}}"
if [ -z "${GITHUB_OUTPUT:-}" ]; then
echo "GITHUB_OUTPUT is not set." >&2
exit 1
fi
if [ -z "$head_sha" ]; then
head_sha="$(git rev-parse HEAD 2>/dev/null || true)"
fi
if [ -z "$base_sha" ]; then
if ! git rev-parse --verify origin/main >/dev/null 2>&1; then
git fetch --no-tags --depth=1 origin main || true
fi
if git rev-parse --verify origin/main >/dev/null 2>&1 && [ -n "$head_sha" ]; then
base_sha="$(git merge-base origin/main "$head_sha" 2>/dev/null || true)"
fi
fi
if [ -z "$base_sha" ] || [ -z "$head_sha" ]; then
echo "run=true" >> "$GITHUB_OUTPUT"
exit 0
fi
if [ "$base_sha" = "0000000000000000000000000000000000000000" ]; then
echo "run=true" >> "$GITHUB_OUTPUT"
exit 0
fi
if ! git cat-file -e "$base_sha" 2>/dev/null; then
git fetch --no-tags --depth=1 origin "$base_sha" || true
fi
if ! git cat-file -e "$base_sha" 2>/dev/null; then
echo "run=true" >> "$GITHUB_OUTPUT"
exit 0
fi
changed_files=$(git diff --name-only "$base_sha" "$head_sha" || true)
case "$mode" in
code)
pattern='^(src/|tests/|examples/|pyproject.toml$|uv.lock$|Makefile$)'
;;
docs)
pattern='^(docs/|mkdocs.yml$)'
;;
*)
pattern="$mode"
;;
esac
if echo "$changed_files" | grep -Eq "$pattern"; then
echo "run=true" >> "$GITHUB_OUTPUT"
else
echo "run=false" >> "$GITHUB_OUTPUT"
fi
================================================
FILE: .github/scripts/pr_labels.py
================================================
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import json
import os
import pathlib
import subprocess
import sys
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any, Final
ALLOWED_LABELS: Final[set[str]] = {
"documentation",
"project",
"bug",
"enhancement",
"dependencies",
"feature:chat-completions",
"feature:core",
"feature:lite-llm",
"feature:mcp",
"feature:realtime",
"feature:sessions",
"feature:tracing",
"feature:voice",
}
DETERMINISTIC_LABELS: Final[set[str]] = {
"documentation",
"project",
"dependencies",
}
MODEL_ONLY_LABELS: Final[set[str]] = {
"bug",
"enhancement",
}
FEATURE_LABELS: Final[set[str]] = ALLOWED_LABELS - DETERMINISTIC_LABELS - MODEL_ONLY_LABELS
SOURCE_FEATURE_PREFIXES: Final[dict[str, tuple[str, ...]]] = {
"feature:realtime": ("src/agents/realtime/",),
"feature:voice": ("src/agents/voice/",),
"feature:mcp": ("src/agents/mcp/",),
"feature:tracing": ("src/agents/tracing/",),
"feature:sessions": ("src/agents/memory/", "src/agents/extensions/memory/"),
}
CORE_EXCLUDED_PREFIXES: Final[tuple[str, ...]] = (
"src/agents/realtime/",
"src/agents/voice/",
"src/agents/mcp/",
"src/agents/tracing/",
"src/agents/memory/",
"src/agents/extensions/",
"src/agents/models/",
)
PR_CONTEXT_DEFAULT_PATH = ".tmp/pr-labels/pr-context.json"
@dataclass(frozen=True)
class PRContext:
title: str = ""
body: str = ""
def read_file_at(commit: str | None, path: str) -> str | None:
if not commit:
return None
try:
return subprocess.check_output(["git", "show", f"{commit}:{path}"], text=True)
except subprocess.CalledProcessError:
return None
def dependency_lines_for_pyproject(text: str) -> set[int]:
dependency_lines: set[int] = set()
current_section: str | None = None
in_project_dependencies = False
for line_number, raw_line in enumerate(text.splitlines(), start=1):
stripped = raw_line.strip()
if stripped.startswith("[") and stripped.endswith("]"):
if stripped.startswith("[[") and stripped.endswith("]]"):
current_section = stripped[2:-2].strip()
else:
current_section = stripped[1:-1].strip()
in_project_dependencies = False
if current_section in ("project.optional-dependencies", "dependency-groups"):
dependency_lines.add(line_number)
continue
if current_section in ("project.optional-dependencies", "dependency-groups"):
dependency_lines.add(line_number)
continue
if current_section != "project":
continue
if in_project_dependencies:
dependency_lines.add(line_number)
if "]" in stripped:
in_project_dependencies = False
continue
if stripped.startswith("dependencies") and "=" in stripped:
dependency_lines.add(line_number)
if "[" in stripped and "]" not in stripped:
in_project_dependencies = True
return dependency_lines
def pyproject_dependency_changed(
diff_text: str,
*,
base_sha: str | None,
head_sha: str | None,
) -> bool:
import re
base_text = read_file_at(base_sha, "pyproject.toml")
head_text = read_file_at(head_sha, "pyproject.toml")
if base_text is None and head_text is None:
return False
base_dependency_lines = dependency_lines_for_pyproject(base_text) if base_text else set()
head_dependency_lines = dependency_lines_for_pyproject(head_text) if head_text else set()
in_pyproject = False
base_line: int | None = None
head_line: int | None = None
hunk_re = re.compile(r"@@ -(\d+)(?:,\d+)? \+(\d+)(?:,\d+)? @@")
for line in diff_text.splitlines():
if line.startswith("+++ b/"):
current_file = line[len("+++ b/") :].strip()
in_pyproject = current_file == "pyproject.toml"
base_line = None
head_line = None
continue
if not in_pyproject:
continue
if line.startswith("@@ "):
match = hunk_re.match(line)
if not match:
continue
base_line = int(match.group(1))
head_line = int(match.group(2))
continue
if base_line is None or head_line is None:
continue
if line.startswith(" "):
base_line += 1
head_line += 1
continue
if line.startswith("-"):
if base_line in base_dependency_lines:
return True
base_line += 1
continue
if line.startswith("+"):
if head_line in head_dependency_lines:
return True
head_line += 1
continue
return False
def infer_specific_feature_labels(changed_files: Sequence[str]) -> set[str]:
source_files = [path for path in changed_files if path.startswith("src/")]
labels: set[str] = set()
for label, prefixes in SOURCE_FEATURE_PREFIXES.items():
if any(path.startswith(prefix) for path in source_files for prefix in prefixes):
labels.add(label)
if any(
path.startswith(("src/agents/models/", "src/agents/extensions/models/"))
and ("chatcmpl" in path or "chatcompletions" in path)
for path in source_files
):
labels.add("feature:chat-completions")
if any(
path.startswith(("src/agents/models/", "src/agents/extensions/models/"))
and "litellm" in path
for path in source_files
):
labels.add("feature:lite-llm")
return labels
def infer_feature_labels(changed_files: Sequence[str]) -> set[str]:
source_files = [path for path in changed_files if path.startswith("src/")]
specific_labels = infer_specific_feature_labels(source_files)
core_touched = any(
path.startswith("src/agents/") and not path.startswith(CORE_EXCLUDED_PREFIXES)
for path in source_files
)
if core_touched and len(specific_labels) != 1:
return {"feature:core"}
return specific_labels
def infer_fallback_labels(changed_files: Sequence[str]) -> set[str]:
return infer_feature_labels(changed_files)
def load_json(path: pathlib.Path) -> Any:
return json.loads(path.read_text())
def load_pr_context(path: pathlib.Path) -> PRContext:
if not path.exists():
return PRContext()
try:
payload = load_json(path)
except json.JSONDecodeError:
return PRContext()
if not isinstance(payload, dict):
return PRContext()
title = payload.get("title", "")
body = payload.get("body", "")
if not isinstance(title, str):
title = ""
if not isinstance(body, str):
body = ""
return PRContext(title=title, body=body)
def load_codex_labels(path: pathlib.Path) -> tuple[list[str], bool]:
if not path.exists():
return [], False
raw = path.read_text().strip()
if not raw:
return [], False
try:
payload = load_json(path)
except json.JSONDecodeError:
return [], False
if not isinstance(payload, dict):
return [], False
labels = payload.get("labels")
if not isinstance(labels, list):
return [], False
if not all(isinstance(label, str) for label in labels):
return [], False
return list(labels), True
def fetch_existing_labels(pr_number: str) -> set[str]:
result = subprocess.check_output(
["gh", "pr", "view", pr_number, "--json", "labels", "--jq", ".labels[].name"],
text=True,
).strip()
return {label for label in result.splitlines() if label}
def infer_title_intent_labels(pr_context: PRContext) -> set[str]:
normalized_title = pr_context.title.strip().lower()
bug_prefixes = ("fix:", "fix(", "bug:", "bugfix:", "hotfix:", "regression:")
enhancement_prefixes = ("feat:", "feat(", "feature:", "enhancement:")
if normalized_title.startswith(bug_prefixes):
return {"bug"}
if normalized_title.startswith(enhancement_prefixes):
return {"enhancement"}
return set()
def compute_desired_labels(
*,
pr_context: PRContext,
changed_files: Sequence[str],
diff_text: str,
codex_ran: bool,
codex_output_valid: bool,
codex_labels: Sequence[str],
base_sha: str | None,
head_sha: str | None,
) -> set[str]:
desired: set[str] = set()
codex_label_set = {label for label in codex_labels if label in ALLOWED_LABELS}
codex_feature_labels = codex_label_set & FEATURE_LABELS
codex_model_only_labels = codex_label_set & MODEL_ONLY_LABELS
fallback_feature_labels = infer_fallback_labels(changed_files)
title_intent_labels = infer_title_intent_labels(pr_context)
if "pyproject.toml" in changed_files:
desired.add("project")
if any(path.startswith("docs/") for path in changed_files):
desired.add("documentation")
dependencies_allowed = "uv.lock" in changed_files
if "pyproject.toml" in changed_files and pyproject_dependency_changed(
diff_text, base_sha=base_sha, head_sha=head_sha
):
dependencies_allowed = True
if dependencies_allowed:
desired.add("dependencies")
if codex_ran and codex_output_valid and codex_feature_labels:
desired.update(codex_feature_labels)
else:
desired.update(fallback_feature_labels)
if title_intent_labels:
desired.update(title_intent_labels)
elif codex_ran and codex_output_valid:
desired.update(codex_model_only_labels)
return desired
def compute_managed_labels(
*,
pr_context: PRContext,
codex_ran: bool,
codex_output_valid: bool,
codex_labels: Sequence[str],
) -> set[str]:
managed = DETERMINISTIC_LABELS | FEATURE_LABELS
title_intent_labels = infer_title_intent_labels(pr_context)
codex_label_set = {label for label in codex_labels if label in MODEL_ONLY_LABELS}
if title_intent_labels or (codex_ran and codex_output_valid and codex_label_set):
managed |= MODEL_ONLY_LABELS
return managed
def parse_args(argv: Sequence[str] | None = None) -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("--pr-number", default=os.environ.get("PR_NUMBER", ""))
parser.add_argument("--base-sha", default=os.environ.get("PR_BASE_SHA", ""))
parser.add_argument("--head-sha", default=os.environ.get("PR_HEAD_SHA", ""))
parser.add_argument(
"--codex-output-path",
default=os.environ.get("CODEX_OUTPUT_PATH", ".tmp/codex/outputs/pr-labels.json"),
)
parser.add_argument("--codex-conclusion", default=os.environ.get("CODEX_CONCLUSION", ""))
parser.add_argument(
"--pr-context-path",
default=os.environ.get("PR_CONTEXT_PATH", PR_CONTEXT_DEFAULT_PATH),
)
parser.add_argument(
"--changed-files-path",
default=os.environ.get("CHANGED_FILES_PATH", ".tmp/pr-labels/changed-files.txt"),
)
parser.add_argument(
"--changes-diff-path",
default=os.environ.get("CHANGES_DIFF_PATH", ".tmp/pr-labels/changes.diff"),
)
return parser.parse_args(argv)
def main(argv: Sequence[str] | None = None) -> int:
args = parse_args(argv)
if not args.pr_number:
raise SystemExit("Missing PR number.")
changed_files_path = pathlib.Path(args.changed_files_path)
changes_diff_path = pathlib.Path(args.changes_diff_path)
codex_output_path = pathlib.Path(args.codex_output_path)
pr_context_path = pathlib.Path(args.pr_context_path)
codex_conclusion = args.codex_conclusion.strip().lower()
codex_ran = bool(codex_conclusion) and codex_conclusion != "skipped"
pr_context = load_pr_context(pr_context_path)
changed_files = []
if changed_files_path.exists():
changed_files = [
line.strip() for line in changed_files_path.read_text().splitlines() if line.strip()
]
diff_text = changes_diff_path.read_text() if changes_diff_path.exists() else ""
codex_labels, codex_output_valid = load_codex_labels(codex_output_path)
if codex_ran and not codex_output_valid:
print(
"Codex output missing or invalid; using fallback feature labels and preserving "
"model-only labels."
)
desired = compute_desired_labels(
pr_context=pr_context,
changed_files=changed_files,
diff_text=diff_text,
codex_ran=codex_ran,
codex_output_valid=codex_output_valid,
codex_labels=codex_labels,
base_sha=args.base_sha or None,
head_sha=args.head_sha or None,
)
existing = fetch_existing_labels(args.pr_number)
managed_labels = compute_managed_labels(
pr_context=pr_context,
codex_ran=codex_ran,
codex_output_valid=codex_output_valid,
codex_labels=codex_labels,
)
to_add = sorted(desired - existing)
to_remove = sorted((existing & managed_labels) - desired)
if not to_add and not to_remove:
print("Labels already up to date.")
return 0
cmd = ["gh", "pr", "edit", args.pr_number]
if to_add:
cmd += ["--add-label", ",".join(to_add)]
if to_remove:
cmd += ["--remove-label", ",".join(to_remove)]
subprocess.check_call(cmd)
return 0
if __name__ == "__main__":
sys.exit(main())
================================================
FILE: .github/scripts/run-asyncio-teardown-stability.sh
================================================
#!/usr/bin/env bash
set -euo pipefail
repeat_count="${1:-5}"
asyncio_progress_args=(
tests/test_asyncio_progress.py
)
run_step_execution_args=(
tests/test_run_step_execution.py
-k
"cancel or post_invoke"
)
for run in $(seq 1 "$repeat_count"); do
echo "Async teardown stability run ${run}/${repeat_count}"
uv run pytest -q "${asyncio_progress_args[@]}"
uv run pytest -q "${run_step_execution_args[@]}"
done
================================================
FILE: .github/scripts/select-release-milestone.py
================================================
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import json
import os
import re
import subprocess
import sys
from urllib import error, request
def warn(message: str) -> None:
print(message, file=sys.stderr)
def parse_version(value: str | None) -> tuple[int, int, int] | None:
if not value:
return None
match = re.match(r"^v?(\d+)\.(\d+)(?:\.(\d+))?", value)
if not match:
return None
major = int(match.group(1))
minor = int(match.group(2))
patch = int(match.group(3) or 0)
return major, minor, patch
def latest_tag_version(exclude_version: tuple[int, int, int] | None) -> tuple[int, int, int] | None:
try:
output = subprocess.check_output(["git", "tag", "--list", "v*"], text=True)
except Exception as exc:
warn(f"Milestone assignment skipped (failed to list tags: {exc}).")
return None
versions: list[tuple[int, int, int]] = []
for tag in output.splitlines():
parsed = parse_version(tag)
if not parsed:
continue
if exclude_version and parsed == exclude_version:
continue
versions.append(parsed)
if not versions:
return None
return max(versions)
def classify_bump(
target: tuple[int, int, int] | None,
previous: tuple[int, int, int] | None,
) -> str | None:
if not target or not previous:
return None
if target < previous:
warn("Milestone assignment skipped (release version is behind latest tag).")
return None
if target[0] != previous[0]:
return "major"
if target[1] != previous[1]:
return "minor"
return "patch"
def parse_milestone_title(title: str | None) -> tuple[int, int] | None:
if not title:
return None
match = re.match(r"^(\d+)\.(\d+)\.x$", title)
if not match:
return None
return int(match.group(1)), int(match.group(2))
def fetch_open_milestones(owner: str, repo: str, token: str) -> list[dict]:
url = f"https://api.github.com/repos/{owner}/{repo}/milestones?state=open&per_page=100"
headers = {
"Accept": "application/vnd.github+json",
"Authorization": f"Bearer {token}",
}
req = request.Request(url, headers=headers)
try:
with request.urlopen(req) as response:
return json.load(response)
except error.HTTPError as exc:
warn(f"Milestone assignment skipped (failed to list milestones: {exc.code}).")
except Exception as exc:
warn(f"Milestone assignment skipped (failed to list milestones: {exc}).")
return []
def select_milestone(milestones: list[dict], required_bump: str) -> str | None:
parsed: list[dict] = []
for milestone in milestones:
parsed_title = parse_milestone_title(milestone.get("title"))
if not parsed_title:
continue
parsed.append(
{
"milestone": milestone,
"major": parsed_title[0],
"minor": parsed_title[1],
}
)
parsed.sort(key=lambda entry: (entry["major"], entry["minor"]))
if not parsed:
warn("Milestone assignment skipped (no open milestones matching X.Y.x).")
return None
majors = sorted({entry["major"] for entry in parsed})
current_major = majors[0]
next_major = majors[1] if len(majors) > 1 else None
current_major_entries = [entry for entry in parsed if entry["major"] == current_major]
patch_target = current_major_entries[0]
minor_target = current_major_entries[1] if len(current_major_entries) > 1 else patch_target
major_target = None
if next_major is not None:
next_major_entries = [entry for entry in parsed if entry["major"] == next_major]
if next_major_entries:
major_target = next_major_entries[0]
target_entry = None
if required_bump == "major":
target_entry = major_target
elif required_bump == "minor":
target_entry = minor_target
else:
target_entry = patch_target
if not target_entry:
warn("Milestone assignment skipped (not enough open milestones for selection).")
return None
return target_entry["milestone"].get("title")
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version (e.g., 0.6.6).")
parser.add_argument(
"--required-bump",
choices=("major", "minor", "patch"),
help="Override bump type (major/minor/patch).",
)
parser.add_argument("--repo", help="GitHub repository (owner/repo).")
parser.add_argument("--token", help="GitHub token.")
args = parser.parse_args()
required_bump = args.required_bump
if not required_bump:
target_version = parse_version(args.version)
if not target_version:
warn("Milestone assignment skipped (missing or invalid release version).")
return 0
previous_version = latest_tag_version(target_version)
required_bump = classify_bump(target_version, previous_version)
if not required_bump:
warn("Milestone assignment skipped (unable to determine required bump).")
return 0
token = args.token or os.environ.get("GITHUB_TOKEN") or os.environ.get("GH_TOKEN")
if not token:
warn("Milestone assignment skipped (missing GitHub token).")
return 0
repo = args.repo or os.environ.get("GITHUB_REPOSITORY")
if not repo or "/" not in repo:
warn("Milestone assignment skipped (missing repository info).")
return 0
owner, name = repo.split("/", 1)
milestones = fetch_open_milestones(owner, name, token)
if not milestones:
return 0
milestone_title = select_milestone(milestones, required_bump)
if milestone_title:
print(milestone_title)
return 0
if __name__ == "__main__":
sys.exit(main())
================================================
FILE: .github/workflows/docs.yml
================================================
name: Deploy docs
on:
push:
branches:
- main
paths:
- "docs/**"
- "mkdocs.yml"
permissions:
contents: write # This allows pushing to gh-pages
jobs:
deploy_docs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd
- name: Determine docs-only push
id: docs-only
run: |
if [ "${{ github.event_name }}" != "push" ]; then
echo "skip=false" >> "$GITHUB_OUTPUT"
exit 0
fi
set -euo pipefail
before="${{ github.event.before }}"
sha="${{ github.sha }}"
changed_files=$(git diff --name-only "$before" "$sha" || true)
non_docs=$(echo "$changed_files" | grep -vE '^(docs/|mkdocs.yml$)' || true)
if [ -n "$non_docs" ]; then
echo "skip=true" >> "$GITHUB_OUTPUT"
else
echo "skip=false" >> "$GITHUB_OUTPUT"
fi
- name: Setup uv
if: steps.docs-only.outputs.skip != 'true'
uses: astral-sh/setup-uv@5a095e7a2014a4212f075830d4f7277575a9d098
with:
enable-cache: true
- name: Install dependencies
if: steps.docs-only.outputs.skip != 'true'
run: make sync
- name: Deploy docs
if: steps.docs-only.outputs.skip != 'true'
run: make deploy-docs
================================================
FILE: .github/workflows/issues.yml
================================================
name: Close inactive issues
on:
schedule:
- cron: "30 1 * * *"
jobs:
close-issues:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@b5d41d4e1d5dceea10e7104786b73624c18a190f
with:
days-before-issue-stale: 7
days-before-issue-close: 3
stale-issue-label: "stale"
exempt-issue-labels: "skip-stale"
stale-issue-message: "This issue is stale because it has been open for 7 days with no activity."
close-issue-message: "This issue was closed because it has been inactive for 3 days since being marked as stale."
any-of-issue-labels: 'question,needs-more-info'
days-before-pr-stale: 10
days-before-pr-close: 7
stale-pr-label: "stale"
exempt-pr-labels: "skip-stale"
stale-pr-message: "This PR is stale because it has been open for 10 days with no activity."
close-pr-message: "This PR was closed because it has been inactive for 7 days since being marked as stale."
repo-token: ${{ secrets.GITHUB_TOKEN }}
================================================
FILE: .github/workflows/pr-labels.yml
================================================
name: Auto label PRs
on:
pull_request_target:
types:
- opened
- reopened
- synchronize
- ready_for_review
workflow_dispatch:
inputs:
pr_number:
description: "PR number to label."
required: true
type: number
permissions:
contents: read
issues: write
pull-requests: write
jobs:
label:
runs-on: ubuntu-latest
steps:
- name: Ensure main workflow
if: ${{ github.event_name == 'workflow_dispatch' && github.ref != 'refs/heads/main' }}
run: |
echo "This workflow must be dispatched from main."
exit 1
- name: Resolve PR context
id: pr
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd
env:
MANUAL_PR_NUMBER: ${{ inputs.pr_number || '' }}
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const isManual = context.eventName === 'workflow_dispatch';
let pr;
if (isManual) {
const prNumber = Number(process.env.MANUAL_PR_NUMBER);
if (!prNumber) {
core.setFailed('workflow_dispatch requires pr_number input.');
return;
}
const { data } = await github.rest.pulls.get({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: prNumber,
});
pr = data;
} else {
pr = context.payload.pull_request;
}
if (!pr) {
core.setFailed('Missing pull request context.');
return;
}
const headRepo = pr.head.repo.full_name;
const repoFullName = `${context.repo.owner}/${context.repo.repo}`;
core.setOutput('pr_number', pr.number);
core.setOutput('base_sha', pr.base.sha);
core.setOutput('head_sha', pr.head.sha);
core.setOutput('head_repo', headRepo);
core.setOutput('is_fork', headRepo !== repoFullName);
core.setOutput('title', pr.title || '');
core.setOutput('body', pr.body || '');
- name: Checkout base
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd
with:
fetch-depth: 0
ref: ${{ steps.pr.outputs.base_sha }}
- name: Fetch PR head
env:
PR_HEAD_REPO: ${{ steps.pr.outputs.head_repo }}
PR_HEAD_SHA: ${{ steps.pr.outputs.head_sha }}
run: |
set -euo pipefail
git fetch --no-tags --prune --recurse-submodules=no \
"https://github.com/${PR_HEAD_REPO}.git" \
"${PR_HEAD_SHA}"
- name: Collect PR diff
id: diff
env:
PR_BASE_SHA: ${{ steps.pr.outputs.base_sha }}
PR_HEAD_SHA: ${{ steps.pr.outputs.head_sha }}
PR_TITLE: ${{ steps.pr.outputs.title }}
PR_BODY: ${{ steps.pr.outputs.body }}
run: |
set -euo pipefail
mkdir -p .tmp/pr-labels
diff_base_sha="$(git merge-base "$PR_BASE_SHA" "$PR_HEAD_SHA")"
echo "diff_base_sha=${diff_base_sha}" >> "$GITHUB_OUTPUT"
git diff --name-only "$diff_base_sha" "$PR_HEAD_SHA" > .tmp/pr-labels/changed-files.txt
git diff "$diff_base_sha" "$PR_HEAD_SHA" > .tmp/pr-labels/changes.diff
python - <<'PY'
import json
import os
import pathlib
pathlib.Path(".tmp/pr-labels/pr-context.json").write_text(
json.dumps(
{
"title": os.environ.get("PR_TITLE", ""),
"body": os.environ.get("PR_BODY", ""),
},
ensure_ascii=False,
indent=2,
)
+ "\n"
)
PY
- name: Prepare Codex output
id: codex-output
run: |
set -euo pipefail
output_dir=".tmp/codex/outputs"
output_file="${output_dir}/pr-labels.json"
mkdir -p "$output_dir"
echo "output_file=${output_file}" >> "$GITHUB_OUTPUT"
- name: Run Codex labeling
id: run_codex
if: ${{ (github.event_name == 'workflow_dispatch' || steps.pr.outputs.is_fork != 'true') && github.actor != 'dependabot[bot]' }}
uses: openai/codex-action@086169432f1d2ab2f4057540b1754d550f6a1189
with:
openai-api-key: ${{ secrets.PROD_OPENAI_API_KEY }}
prompt-file: .github/codex/prompts/pr-labels.md
output-file: ${{ steps.codex-output.outputs.output_file }}
output-schema-file: .github/codex/schemas/pr-labels.json
# Keep the legacy Linux sandbox path until the default bubblewrap path
# works reliably on GitHub-hosted Ubuntu runners.
codex-args: '["--enable","use_legacy_landlock"]'
safety-strategy: drop-sudo
sandbox: read-only
- name: Apply labels
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ steps.pr.outputs.pr_number }}
PR_BASE_SHA: ${{ steps.diff.outputs.diff_base_sha }}
PR_HEAD_SHA: ${{ steps.pr.outputs.head_sha }}
CODEX_OUTPUT_PATH: ${{ steps.codex-output.outputs.output_file }}
CODEX_CONCLUSION: ${{ steps.run_codex.conclusion }}
run: |
python .github/scripts/pr_labels.py
- name: Comment on manual run failure
if: ${{ github.event_name == 'workflow_dispatch' && always() }}
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd
env:
PR_NUMBER: ${{ steps.pr.outputs.pr_number }}
JOB_STATUS: ${{ job.status }}
RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
CODEX_CONCLUSION: ${{ steps.run_codex.conclusion }}
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const marker = '<!-- pr-labels-manual-run -->';
const jobStatus = process.env.JOB_STATUS;
if (jobStatus === 'success') {
return;
}
const prNumber = Number(process.env.PR_NUMBER);
if (!prNumber) {
core.setFailed('Missing PR number for manual run comment.');
return;
}
const body = [
marker,
'Manual PR labeling failed.',
`Job status: ${jobStatus}.`,
`Run: ${process.env.RUN_URL}.`,
`Codex labeling: ${process.env.CODEX_CONCLUSION}.`,
].join('\n');
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
per_page: 100,
});
const existing = comments.find(
(comment) =>
comment.user?.login === 'github-actions[bot]' &&
comment.body?.includes(marker),
);
if (existing) {
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: existing.id,
body,
});
core.info(`Updated existing comment ${existing.id}`);
return;
}
const { data: created } = await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
body,
});
core.info(`Created comment ${created.id}`);
================================================
FILE: .github/workflows/publish.yml
================================================
name: Publish to PyPI
on:
release:
types:
- published
permissions:
contents: read
jobs:
publish:
environment:
name: pypi
url: https://pypi.org/p/openai-agents
permissions:
id-token: write # Important for trusted publishing to PyPI
runs-on: ubuntu-latest
env:
OPENAI_API_KEY: fake-for-tests
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd
- name: Setup uv
uses: astral-sh/setup-uv@5a095e7a2014a4212f075830d4f7277575a9d098
with:
enable-cache: true
- name: Install dependencies
run: make sync
- name: Build package
run: uv build
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e
================================================
FILE: .github/workflows/release-pr-update.yml
================================================
name: Update release PR on main updates
on:
push:
branches:
- main
concurrency:
group: release-pr-update
cancel-in-progress: true
permissions:
contents: write
pull-requests: write
jobs:
update-release-pr:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd
with:
fetch-depth: 0
- name: Fetch tags
run: git fetch origin --tags --prune
- name: Configure git
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
- name: Find release PR
id: find
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
set -euo pipefail
base_branch="main"
prs_json="$(gh pr list \
--base "$base_branch" \
--state open \
--search "head:release/v" \
--limit 200 \
--json number,headRefName,isCrossRepository,headRepositoryOwner)"
count="$(echo "$prs_json" | jq '[.[] | select(.isCrossRepository == false) | select(.headRefName|startswith("release/v"))] | length')"
if [ "$count" -eq 0 ]; then
echo "found=false" >> "$GITHUB_OUTPUT"
exit 0
fi
if [ "$count" -gt 1 ]; then
echo "Multiple release PRs found; expected a single release PR." >&2
exit 1
fi
number="$(echo "$prs_json" | jq -r '.[] | select(.isCrossRepository == false) | select(.headRefName|startswith("release/v")) | .number')"
branch="$(echo "$prs_json" | jq -r '.[] | select(.isCrossRepository == false) | select(.headRefName|startswith("release/v")) | .headRefName')"
echo "found=true" >> "$GITHUB_OUTPUT"
echo "number=$number" >> "$GITHUB_OUTPUT"
echo "branch=$branch" >> "$GITHUB_OUTPUT"
- name: Rebase release branch
if: steps.find.outputs.found == 'true'
env:
RELEASE_BRANCH: ${{ steps.find.outputs.branch }}
run: |
set -euo pipefail
git fetch origin main "$RELEASE_BRANCH"
git checkout -B "$RELEASE_BRANCH" "origin/$RELEASE_BRANCH"
git rebase origin/main
- name: Prepare Codex output
if: steps.find.outputs.found == 'true'
id: codex-output
run: |
set -euo pipefail
output_dir=".tmp/codex/outputs"
output_file="${output_dir}/release-review.md"
mkdir -p "$output_dir"
echo "output_file=${output_file}" >> "$GITHUB_OUTPUT"
- name: Run Codex release review
if: steps.find.outputs.found == 'true'
uses: openai/codex-action@086169432f1d2ab2f4057540b1754d550f6a1189
with:
openai-api-key: ${{ secrets.PROD_OPENAI_API_KEY }}
prompt-file: .github/codex/prompts/release-review.md
output-file: ${{ steps.codex-output.outputs.output_file }}
# Keep the legacy Linux sandbox path until the default bubblewrap path
# works reliably on GitHub-hosted Ubuntu runners.
codex-args: '["--enable","use_legacy_landlock"]'
safety-strategy: drop-sudo
sandbox: read-only
- name: Update PR body and push
if: steps.find.outputs.found == 'true'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ steps.find.outputs.number }}
RELEASE_BRANCH: ${{ steps.find.outputs.branch }}
RELEASE_REVIEW_PATH: ${{ steps.codex-output.outputs.output_file }}
run: |
set -euo pipefail
git push --force-with-lease origin "$RELEASE_BRANCH"
gh pr edit "$PR_NUMBER" --body-file "$RELEASE_REVIEW_PATH"
version="${RELEASE_BRANCH#release/v}"
milestone_name="$(python .github/scripts/select-release-milestone.py --version "$version")"
if [ -n "$milestone_name" ]; then
if ! gh pr edit "$PR_NUMBER" --add-label "project" --milestone "$milestone_name"; then
echo "PR label/milestone update failed; continuing without changes." >&2
fi
else
if ! gh pr edit "$PR_NUMBER" --add-label "project"; then
echo "PR label update failed; continuing without changes." >&2
fi
fi
================================================
FILE: .github/workflows/release-pr.yml
================================================
name: Create release PR
on:
workflow_dispatch:
inputs:
version:
description: "Version to release (e.g., 0.6.6)"
required: true
permissions:
contents: write
pull-requests: write
jobs:
release-pr:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd
with:
fetch-depth: 0
ref: main
- name: Setup uv
uses: astral-sh/setup-uv@5a095e7a2014a4212f075830d4f7277575a9d098
with:
enable-cache: true
- name: Fetch tags
run: git fetch origin --tags --prune
- name: Ensure release branch does not exist
env:
RELEASE_VERSION: ${{ inputs.version }}
run: |
branch="release/v${RELEASE_VERSION}"
if git ls-remote --exit-code --heads origin "$branch" >/dev/null 2>&1; then
echo "Branch $branch already exists on origin." >&2
exit 1
fi
- name: Update version
env:
RELEASE_VERSION: ${{ inputs.version }}
run: |
python - <<'PY'
import os
import pathlib
import re
import sys
version = os.environ["RELEASE_VERSION"]
if version.startswith("v"):
print("Version must not start with 'v' (use x.y.z...).", file=sys.stderr)
sys.exit(1)
if ".." in version:
print("Version contains consecutive dots (use x.y.z...).", file=sys.stderr)
sys.exit(1)
if not re.match(r"^\d+\.\d+(\.\d+)*([a-zA-Z0-9\.-]+)?$", version):
print(
"Version must be semver-like (e.g., 0.6.6, 0.6.6-rc1, 0.6.6.dev1).",
file=sys.stderr,
)
sys.exit(1)
path = pathlib.Path("pyproject.toml")
text = path.read_text()
updated, count = re.subn(
r'(?m)^version\s*=\s*"[^\"]+"',
f'version = "{version}"',
text,
)
if count != 1:
print("Expected to update exactly one version line.", file=sys.stderr)
sys.exit(1)
if updated == text:
print("Version already set; no changes made.", file=sys.stderr)
sys.exit(1)
path.write_text(updated)
PY
- name: Sync dependencies
run: make sync
- name: Configure git
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
- name: Create release branch and commit
env:
RELEASE_VERSION: ${{ inputs.version }}
run: |
branch="release/v${RELEASE_VERSION}"
git checkout -b "$branch"
git add pyproject.toml uv.lock
if git diff --cached --quiet; then
echo "No changes to commit." >&2
exit 1
fi
git commit -m "Bump version to ${RELEASE_VERSION}"
git push --set-upstream origin "$branch"
- name: Prepare Codex output
id: codex-output
run: |
set -euo pipefail
output_dir=".tmp/codex/outputs"
output_file="${output_dir}/release-review.md"
mkdir -p "$output_dir"
echo "output_file=${output_file}" >> "$GITHUB_OUTPUT"
- name: Run Codex release review
uses: openai/codex-action@086169432f1d2ab2f4057540b1754d550f6a1189
with:
openai-api-key: ${{ secrets.PROD_OPENAI_API_KEY }}
prompt-file: .github/codex/prompts/release-review.md
output-file: ${{ steps.codex-output.outputs.output_file }}
# Keep the legacy Linux sandbox path until the default bubblewrap path
# works reliably on GitHub-hosted Ubuntu runners.
codex-args: '["--enable","use_legacy_landlock"]'
safety-strategy: drop-sudo
sandbox: read-only
- name: Build PR body
env:
RELEASE_REVIEW_PATH: ${{ steps.codex-output.outputs.output_file }}
run: |
python - <<'PY'
import os
import pathlib
report = pathlib.Path(os.environ["RELEASE_REVIEW_PATH"]).read_text()
pathlib.Path("pr-body.md").write_text(report)
PY
- name: Create or update PR
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
RELEASE_VERSION: ${{ inputs.version }}
run: |
set -euo pipefail
head_branch="release/v${RELEASE_VERSION}"
milestone_name="$(python .github/scripts/select-release-milestone.py --version "$RELEASE_VERSION")"
pr_number="$(gh pr list --head "$head_branch" --base "main" --json number --jq '.[0].number // empty')"
if [ -z "$pr_number" ]; then
create_args=(
--title "Release ${RELEASE_VERSION}"
--body-file pr-body.md
--base "main"
--head "$head_branch"
--label "project"
)
if [ -n "$milestone_name" ]; then
create_args+=(--milestone "$milestone_name")
fi
if ! gh pr create "${create_args[@]}"; then
echo "PR create with label/milestone failed; retrying without them." >&2
gh pr create \
--title "Release ${RELEASE_VERSION}" \
--body-file pr-body.md \
--base "main" \
--head "$head_branch"
fi
else
edit_args=(
--title "Release ${RELEASE_VERSION}"
--body-file pr-body.md
--add-label "project"
)
if [ -n "$milestone_name" ]; then
edit_args+=(--milestone "$milestone_name")
fi
if ! gh pr edit "$pr_number" "${edit_args[@]}"; then
echo "PR edit with label/milestone failed; retrying without them." >&2
gh pr edit "$pr_number" --title "Release ${RELEASE_VERSION}" --body-file pr-body.md
fi
fi
================================================
FILE: .github/workflows/release-tag.yml
================================================
name: Tag release on merge
on:
pull_request:
types:
- closed
branches:
- main
permissions:
contents: write
jobs:
tag-release:
if: >-
github.event.pull_request.merged == true &&
startsWith(github.event.pull_request.head.ref, 'release/v')
runs-on: ubuntu-latest
steps:
- name: Validate merge commit
env:
MERGE_SHA: ${{ github.event.pull_request.merge_commit_sha }}
run: |
if [ -z "$MERGE_SHA" ]; then
echo "merge_commit_sha is empty; refusing to tag to avoid tagging the wrong commit." >&2
exit 1
fi
- name: Checkout merge commit
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd
with:
fetch-depth: 0
ref: ${{ github.event.pull_request.merge_commit_sha }}
- name: Setup Python
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405
with:
python-version: "3.11"
- name: Configure git
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
- name: Fetch tags
run: git fetch origin --tags --prune
- name: Resolve version
id: version
env:
HEAD_REF: ${{ github.event.pull_request.head.ref }}
run: |
python - <<'PY'
import os
import pathlib
import sys
import tomllib
path = pathlib.Path("pyproject.toml")
data = tomllib.loads(path.read_text())
version = data.get("project", {}).get("version")
if not version:
print("Missing project.version in pyproject.toml.", file=sys.stderr)
sys.exit(1)
head_ref = os.environ.get("HEAD_REF", "")
if head_ref.startswith("release/v"):
expected = head_ref[len("release/v") :]
if expected != version:
print(
f"Version mismatch: branch {expected} vs pyproject {version}.",
file=sys.stderr,
)
sys.exit(1)
output_path = pathlib.Path(os.environ["GITHUB_OUTPUT"])
output_path.write_text(f"version={version}\n")
PY
- name: Create tag
env:
VERSION: ${{ steps.version.outputs.version }}
run: |
if git tag -l "v${VERSION}" | grep -q "v${VERSION}"; then
echo "Tag v${VERSION} already exists; skipping."
exit 0
fi
git tag -a "v${VERSION}" -m "Release v${VERSION}"
git push origin "v${VERSION}"
================================================
FILE: .github/workflows/tests.yml
================================================
name: Tests
on:
push:
branches:
- main
pull_request:
# All PRs, including stacked PRs
permissions:
contents: read
env:
UV_FROZEN: "1"
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd
- name: Detect code changes
id: changes
run: ./.github/scripts/detect-changes.sh code "${{ github.event.pull_request.base.sha || github.event.before }}" "${{ github.sha }}"
- name: Setup uv
if: steps.changes.outputs.run == 'true'
uses: astral-sh/setup-uv@5a095e7a2014a4212f075830d4f7277575a9d098
with:
enable-cache: true
- name: Install dependencies
if: steps.changes.outputs.run == 'true'
run: make sync
- name: Verify formatting
if: steps.changes.outputs.run == 'true'
run: make format-check
- name: Run lint
if: steps.changes.outputs.run == 'true'
run: make lint
- name: Skip lint
if: steps.changes.outputs.run != 'true'
run: echo "Skipping lint for non-code changes."
typecheck:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd
- name: Detect code changes
id: changes
run: ./.github/scripts/detect-changes.sh code "${{ github.event.pull_request.base.sha || github.event.before }}" "${{ github.sha }}"
- name: Setup uv
if: steps.changes.outputs.run == 'true'
uses: astral-sh/setup-uv@5a095e7a2014a4212f075830d4f7277575a9d098
with:
enable-cache: true
- name: Install dependencies
if: steps.changes.outputs.run == 'true'
run: make sync
- name: Run typecheck
if: steps.changes.outputs.run == 'true'
run: make typecheck
- name: Skip typecheck
if: steps.changes.outputs.run != 'true'
run: echo "Skipping typecheck for non-code changes."
tests:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python-version:
- "3.10"
- "3.11"
- "3.12"
- "3.13"
- "3.14"
env:
OPENAI_API_KEY: fake-for-tests
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd
- name: Detect code changes
id: changes
run: ./.github/scripts/detect-changes.sh code "${{ github.event.pull_request.base.sha || github.event.before }}" "${{ github.sha }}"
- name: Setup uv
if: steps.changes.outputs.run == 'true'
uses: astral-sh/setup-uv@5a095e7a2014a4212f075830d4f7277575a9d098
with:
enable-cache: true
python-version: ${{ matrix.python-version }}
- name: Install dependencies
if: steps.changes.outputs.run == 'true'
run: make sync
- name: Run tests with coverage
if: steps.changes.outputs.run == 'true' && matrix.python-version == '3.12'
run: make coverage
- name: Run tests
if: steps.changes.outputs.run == 'true' && matrix.python-version != '3.12'
run: make tests
- name: Run async teardown stability tests
if: steps.changes.outputs.run == 'true' && (matrix.python-version == '3.10' || matrix.python-version == '3.14')
run: make tests-asyncio-stability
- name: Skip tests
if: steps.changes.outputs.run != 'true'
run: echo "Skipping tests for non-code changes."
build-docs:
runs-on: ubuntu-latest
env:
OPENAI_API_KEY: fake-for-tests
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd
- name: Detect docs changes
id: changes
run: ./.github/scripts/detect-changes.sh docs "${{ github.event.pull_request.base.sha || github.event.before }}" "${{ github.sha }}"
- name: Setup uv
if: steps.changes.outputs.run == 'true'
uses: astral-sh/setup-uv@5a095e7a2014a4212f075830d4f7277575a9d098
with:
enable-cache: true
- name: Install dependencies
if: steps.changes.outputs.run == 'true'
run: make sync
- name: Build docs
if: steps.changes.outputs.run == 'true'
run: make build-docs
- name: Skip docs build
if: steps.changes.outputs.run != 'true'
run: echo "Skipping docs build for non-docs changes."
================================================
FILE: .github/workflows/update-docs.yml
================================================
name: "Update Translated Docs"
# This GitHub Actions job automates the process of updating all translated document pages. Please note the following:
# 1. The translation results may vary each time; some differences in detail are expected.
# 2. When you add a new page to the left-hand menu, **make sure to manually update mkdocs.yml** to include the new item.
# 3. If you switch to a different LLM (for example, from o3 to a newer model), be sure to conduct thorough testing before making the switch.
# To add more languages, you will update the following:
# 1. Add '!docs/{lang}/**' to `on.push.paths` in this file
# 2. Update mkdocs.yml to have the new language
# 3. Update docs/scripts/translate_docs.py to have the new language
on:
push:
branches:
- main
paths:
- 'docs/**'
- mkdocs.yml
- '!docs/ja/**'
- '!docs/ko/**'
- '!docs/zh/**'
workflow_dispatch:
inputs:
translate_mode:
description: "Translation mode"
type: choice
options:
- only-changes
- full
default: only-changes
permissions:
contents: write
pull-requests: write
jobs:
update-docs:
if: "!contains(github.event.head_commit.message, 'Update all translated document pages')"
name: Build and Push Translated Docs
runs-on: ubuntu-latest
timeout-minutes: 30
env:
PROD_OPENAI_API_KEY: ${{ secrets.PROD_OPENAI_API_KEY }}
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd
with:
fetch-depth: 0
- name: Setup uv
uses: astral-sh/setup-uv@5a095e7a2014a4212f075830d4f7277575a9d098
with:
enable-cache: true
- name: Install dependencies
run: make sync
- name: Build translated docs
run: |
mode="${{ inputs.translate_mode || 'only-changes' }}"
uv run docs/scripts/translate_docs.py --mode "$mode"
uv run mkdocs build
- name: Commit changes
id: commit
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
git add docs/
if git diff --cached --quiet; then
echo "No changes to commit"
echo "committed=false" >> "$GITHUB_OUTPUT"
else
git commit -m "Update all translated document pages"
echo "committed=true" >> "$GITHUB_OUTPUT"
fi
- name: Create Pull Request
if: steps.commit.outputs.committed == 'true'
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0
with:
commit-message: "Update translated document pages"
title: "docs: update translated document pages"
body: |
Automated update of translated documentation.
Triggered by commit: [${{ github.event.head_commit.id }}](${{ github.server_url }}/${{ github.repository }}/commit/${{ github.event.head_commit.id }}).
Message: `${{ github.event.head_commit.message }}`
branch: update-translated-docs-${{ github.run_id }}
delete-branch: true
================================================
FILE: .gitignore
================================================
# macOS Files
.DS_Store
# Byte-compiled / optimized / DLL files
__pycache__/
**/__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
.tmp/
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pdm
.pdm.toml
.pdm-python
.pdm-build/
# PEP 582
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.python-version
.env*
.venv
.venv*
env/
venv/
ENV/
env.bak/
venv.bak/
.venv39
.venv_res
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
.idea/
# Ruff stuff:
.ruff_cache/
# PyPI configuration file
.pypirc
.aider*
# Redis database files
dump.rdb
tmp/
# execplans
plans/
================================================
FILE: .prettierrc
================================================
{
"tabWidth": 4,
"overrides": [
{
"files": "*.yml",
"options": {
"tabWidth": 2
}
}
]
}
================================================
FILE: .vscode/launch.json
================================================
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Python Debugger: Python File",
"type": "debugpy",
"request": "launch",
"program": "${file}"
}
]
}
================================================
FILE: .vscode/settings.json
================================================
{
"python.testing.pytestArgs": [
"tests"
],
"python.testing.unittestEnabled": false,
"python.testing.pytestEnabled": true
}
================================================
FILE: AGENTS.md
================================================
# Contributor Guide
This guide helps new contributors get started with the OpenAI Agents Python repository. It covers repo structure, how to test your work, available utilities, and guidelines for commits and PRs.
**Location:** `AGENTS.md` at the repository root.
## Table of Contents
1. [Policies & Mandatory Rules](#policies--mandatory-rules)
2. [Project Structure Guide](#project-structure-guide)
3. [Operation Guide](#operation-guide)
## Policies & Mandatory Rules
### Mandatory Skill Usage
#### `$code-change-verification`
Run `$code-change-verification` before marking work complete when changes affect runtime code, tests, or build/test behavior.
Run it when you change:
- `src/agents/` (library code) or shared utilities.
- `tests/` or add or modify snapshot tests.
- `examples/`.
- Build or test configuration such as `pyproject.toml`, `Makefile`, `mkdocs.yml`, `docs/scripts/`, or CI workflows.
You can skip `$code-change-verification` for docs-only or repo-meta changes (for example, `docs/`, `.agents/`, `README.md`, `AGENTS.md`, `.github/`), unless a user explicitly asks to run the full verification stack.
#### `$openai-knowledge`
When working on OpenAI API or OpenAI platform integrations in this repo (Responses API, tools, streaming, Realtime API, auth, models, rate limits, MCP, Agents SDK or ChatGPT Apps SDK), use `$openai-knowledge` to pull authoritative docs via the OpenAI Developer Docs MCP server (and guide setup if it is not configured).
#### `$implementation-strategy`
Before changing runtime code, exported APIs, external configuration, persisted schemas, wire protocols, or other user-facing behavior, use `$implementation-strategy` to decide the compatibility boundary and implementation shape. Judge breaking changes against the latest release tag, not unreleased branch-local churn. Interfaces introduced or changed after the latest release tag may be rewritten without compatibility shims unless they define a released or explicitly supported durable external state boundary, or the user explicitly asks for a migration path. Unreleased persisted formats on `main` may be renumbered or squashed before release when intermediate snapshots are intentionally unsupported.
### ExecPlans
Call out compatibility risk early in your plan only when the change affects behavior shipped in the latest release tag or a released or explicitly supported durable external state boundary, and confirm the approach before implementing changes that could impact users.
Use an ExecPlan when work is multi-step, spans several files, involves new features or refactors, or is likely to take more than about an hour. Start with the template and rules in `PLANS.md`, keep milestones and living sections (Progress, Surprises & Discoveries, Decision Log, Outcomes & Retrospective) up to date as you execute, and rewrite the plan if scope shifts. Call out compatibility risk only when the plan changes behavior shipped in the latest release tag or a released or explicitly supported durable external state boundary. Do not treat branch-local interface churn or unreleased post-tag changes on `main` as breaking by default; prefer direct replacement over compatibility layers in those cases, and renumber or squash unreleased persisted schemas before release when the intermediate snapshots are intentionally unsupported. If you intentionally skip an ExecPlan for a complex task, note why in your response so reviewers understand the choice.
### Public API Positional Compatibility
Treat the parameter and dataclass field order of exported runtime APIs as a compatibility contract.
- For public constructors (for example `RunConfig`, `FunctionTool`, `AgentHookContext`), preserve existing positional argument meaning. Do not insert new constructor parameters or dataclass fields in the middle of existing public order.
- When adding a new optional public field/parameter, append it to the end whenever possible and keep old fields in the same order.
- If reordering is unavoidable, add an explicit compatibility layer and regression tests that exercise the old positional call pattern.
- Prefer keyword arguments at call sites to reduce accidental breakage, but do not rely on this to justify breaking positional compatibility for public APIs.
## Project Structure Guide
### Overview
The OpenAI Agents Python repository provides the Python Agents SDK, examples, and documentation built with MkDocs. Use `uv run python ...` for Python commands to ensure a consistent environment.
### Repo Structure & Important Files
- `src/agents/`: Core library implementation.
- `tests/`: Test suite; see `tests/README.md` for snapshot guidance.
- `examples/`: Sample projects showing SDK usage.
- `docs/`: MkDocs documentation source; do not edit translated docs under `docs/ja`, `docs/ko`, or `docs/zh` (they are generated).
- `docs/scripts/`: Documentation utilities, including translation and reference generation.
- `mkdocs.yml`: Documentation site configuration.
- `Makefile`: Common developer commands.
- `pyproject.toml`, `uv.lock`: Python dependencies and tool configuration.
- `.github/PULL_REQUEST_TEMPLATE/pull_request_template.md`: Pull request template to use when opening PRs.
- `site/`: Built documentation output.
### Agents Core Runtime Guidelines
- `src/agents/run.py` is the runtime entrypoint (`Runner`, `AgentRunner`). Keep it focused on orchestration and public flow control. Put new runtime logic under `src/agents/run_internal/` and import it into `run.py`.
- When `run.py` grows, refactor helpers into `run_internal/` modules (for example `run_loop.py`, `turn_resolution.py`, `tool_execution.py`, `session_persistence.py`) and leave only wiring and composition in `run.py`.
- Keep streaming and non-streaming paths behaviorally aligned. Changes to `run_internal/run_loop.py` (`run_single_turn`, `run_single_turn_streamed`, `get_new_response`, `start_streaming`) should be mirrored, and any new streaming item types must be reflected in `src/agents/stream_events.py`.
- Input guardrails run only on the first turn and only for the starting agent. Resuming an interruption from `RunState` must not increment the turn counter; only actual model calls advance turns.
- Server-managed conversation (`conversation_id`, `previous_response_id`, `auto_previous_response_id`) uses `OpenAIServerConversationTracker` in `run_internal/oai_conversation.py`. Only deltas should be sent. If `call_model_input_filter` is used, it must return `ModelInputData` with a list input and the tracker must be updated with the filtered input (`mark_input_as_sent`). Session persistence is disabled when server-managed conversation is active.
- Adding new tool/output/approval item types requires coordinated updates across:
- `src/agents/items.py` (RunItem types and conversions)
- `src/agents/run_internal/run_steps.py` (ProcessedResponse and tool run structs)
- `src/agents/run_internal/turn_resolution.py` (model output processing, run item extraction)
- `src/agents/run_internal/tool_execution.py` and `src/agents/run_internal/tool_planning.py`
- `src/agents/run_internal/items.py` (normalization, dedupe, approval filtering)
- `src/agents/stream_events.py` (stream event names)
- `src/agents/run_state.py` (RunState serialization/deserialization)
- `src/agents/run_internal/session_persistence.py` (session save/rewind)
- If the serialized RunState shape changes, update `CURRENT_SCHEMA_VERSION` in `src/agents/run_state.py` and the related serialization/deserialization logic. Keep released schema versions readable, and feel free to renumber or squash unreleased schema versions before release when those intermediate snapshots are intentionally unsupported.
## Operation Guide
### Prerequisites
- Python 3.10+.
- `uv` installed for dependency management (`uv sync`) and `uv run` for Python commands.
- `make` available to run repository tasks.
### Development Workflow
1. Sync with `main` and create a feature branch:
```bash
git checkout -b feat/<short-description>
```
2. If dependencies changed or you are setting up the repo, run `make sync`.
3. Implement changes and add or update tests alongside code updates.
4. Highlight compatibility or API risks in your plan before implementing changes that alter the latest released behavior or a released or explicitly supported durable external state boundary.
5. Build docs when you touch documentation:
```bash
make build-docs
```
6. When `$code-change-verification` applies, run it to execute the full verification stack before marking work complete.
7. Commit with concise, imperative messages; keep commits small and focused, then open a pull request.
8. When reporting code changes as complete (after substantial code work), invoke `$pr-draft-summary` to generate the required PR summary block with change summary, PR title, and draft description.
### Testing & Automated Checks
Before submitting changes, ensure relevant checks pass and extend tests when you touch code.
When `$code-change-verification` applies, run it to execute the required verification stack from the repository root. Rerun the full stack after applying fixes.
#### Unit tests and type checking
- Run the full test suite:
```bash
make tests
```
- Run a focused test:
```bash
uv run pytest -s -k <pattern>
```
- Type checking:
```bash
make typecheck
```
#### Snapshot tests
Some tests rely on inline snapshots; see `tests/README.md` for details. Re-run `make tests` after updating snapshots.
- Fix snapshots:
```bash
make snapshots-fix
```
- Create new snapshots:
```bash
make snapshots-create
```
#### Coverage
- Generate coverage (fails if coverage drops below threshold):
```bash
make coverage
```
#### Formatting, linting, and type checking
- Formatting and linting use `ruff`; run `make format` (applies fixes) and `make lint` (checks only).
- Type hints must pass `make typecheck`.
- Write comments as full sentences ending with a period.
- Imports are managed by Ruff and should stay sorted.
#### Mandatory local run order
When `$code-change-verification` applies, run the full sequence in order (or use the skill scripts):
```bash
make format
make lint
make typecheck
make tests
```
### Utilities & Tips
- Install or refresh development dependencies:
```bash
make sync
```
- Run tests against the oldest supported version (Python 3.10) in an isolated environment:
```bash
UV_PROJECT_ENVIRONMENT=.venv_310 uv sync --python 3.10 --all-extras --all-packages --group dev
UV_PROJECT_ENVIRONMENT=.venv_310 uv run --python 3.10 -m pytest
```
- Documentation workflows:
```bash
make build-docs # build docs after editing docs
make serve-docs # preview docs locally
make build-full-docs # run translations and build
```
- Snapshot helpers:
```bash
make snapshots-fix
make snapshots-create
```
- Use `examples/` to see common SDK usage patterns.
- Review `Makefile` for common commands and use `uv run` for Python invocations.
- Explore `docs/` and `docs/scripts/` to understand the documentation pipeline.
- Consult `tests/README.md` for test and snapshot workflows.
- Check `mkdocs.yml` to understand how docs are organized.
### Pull Request & Commit Guidelines
- Use the template at `.github/PULL_REQUEST_TEMPLATE/pull_request_template.md`; include a summary, test plan, and issue number if applicable.
- Add tests for new behavior when feasible and update documentation for user-facing changes.
- Run `make format`, `make lint`, `make typecheck`, and `make tests` before marking work ready.
- Commit messages should be concise and written in the imperative mood. Small, focused commits are preferred.
### Review Process & What Reviewers Look For
- ✅ Checks pass (`make format`, `make lint`, `make typecheck`, `make tests`).
- ✅ Tests cover new behavior and edge cases.
- ✅ Code is readable, maintainable, and consistent with existing style.
- ✅ Public APIs and user-facing behavior changes are documented.
- ✅ Examples are updated if behavior changes.
- ✅ History is clean with a clear PR description.
================================================
FILE: CLAUDE.md
================================================
Read the AGENTS.md file for instructions.
================================================
FILE: LICENSE
================================================
MIT License
Copyright (c) 2025 OpenAI
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: Makefile
================================================
.PHONY: sync
sync:
uv sync --all-extras --all-packages --group dev
.PHONY: format
format:
uv run ruff format
uv run ruff check --fix
.PHONY: format-check
format-check:
uv run ruff format --check
.PHONY: lint
lint:
uv run ruff check
.PHONY: mypy
mypy:
uv run mypy . --exclude site
.PHONY: pyright
pyright:
uv run pyright --project pyrightconfig.json
.PHONY: typecheck
typecheck:
@set -eu; \
mypy_pid=''; \
pyright_pid=''; \
trap 'test -n "$$mypy_pid" && kill $$mypy_pid 2>/dev/null || true; test -n "$$pyright_pid" && kill $$pyright_pid 2>/dev/null || true' EXIT INT TERM; \
echo "Running make mypy and make pyright in parallel..."; \
$(MAKE) mypy & mypy_pid=$$!; \
$(MAKE) pyright & pyright_pid=$$!; \
wait $$mypy_pid; \
wait $$pyright_pid; \
trap - EXIT
.PHONY: tests
tests: tests-parallel tests-serial
.PHONY: tests-asyncio-stability
tests-asyncio-stability:
bash .github/scripts/run-asyncio-teardown-stability.sh
.PHONY: tests-parallel
tests-parallel:
uv run pytest -n auto --dist loadfile -m "not serial"
.PHONY: tests-serial
tests-serial:
uv run pytest -m serial
.PHONY: coverage
coverage:
uv run coverage run -m pytest
uv run coverage xml -o coverage.xml
uv run coverage report -m --fail-under=85
.PHONY: snapshots-fix
snapshots-fix:
uv run pytest --inline-snapshot=fix
.PHONY: snapshots-create
snapshots-create:
uv run pytest --inline-snapshot=create
.PHONY: build-docs
build-docs:
uv run docs/scripts/generate_ref_files.py
uv run mkdocs build
.PHONY: build-full-docs
build-full-docs:
uv run docs/scripts/translate_docs.py
uv run mkdocs build
.PHONY: serve-docs
serve-docs:
uv run mkdocs serve
.PHONY: deploy-docs
deploy-docs:
uv run mkdocs gh-deploy --force --verbose
.PHONY: check
check: format-check lint typecheck tests
================================================
FILE: PLANS.md
================================================
# Codex Execution Plans (ExecPlans)
This file defines how to write and maintain an ExecPlan: a self-contained, living specification that a novice can follow to deliver observable, working behavior in this repository.
## When to Use an ExecPlan
- Required for multi-step or multi-file work, new features, refactors, or tasks expected to take more than about an hour.
- Optional for trivial fixes (typos, small docs), but if you skip it for a substantial task, state the reason in your response.
## How to Use This File
- Authoring: read this file end to end before drafting; start from the skeleton; embed all context (paths, commands, definitions) so no external docs are needed.
- Implementing: move directly to the next milestone without asking for next steps; keep the living sections current at every stopping point.
- Discussing: record decisions and rationale inside the plan so work can be resumed later using only the ExecPlan.
## Non-Negotiable Requirements
- Self-contained and beginner-friendly: define every term; include needed repo knowledge; avoid assuming prior plans or external links.
- Living document: revise Progress, Surprises & Discoveries, Decision Log, and Outcomes & Retrospective as work proceeds while keeping the plan self-contained.
- Outcome-focused: describe what the user can do after the change and how to see it working; the plan must lead to demonstrably working behavior, not just code edits.
- Explicit acceptance: state behaviors, commands, and observable outputs that prove success.
## Formatting Rules
- Default envelope is a single fenced code block labeled `md`; do not nest other triple backticks inside—indent commands, transcripts, and diffs instead.
- If the file contains only the ExecPlan, omit the enclosing code fence.
- Use blank lines after headings; prefer prose over lists. Checklists are permitted only in the Progress section (and are mandatory there).
## Guidelines
- Define jargon immediately and tie it to concrete files or commands in this repo.
- Anchor on outcomes: acceptance should be phrased as observable behavior; for internal changes, show tests or scenarios that demonstrate the effect.
- Specify repository context explicitly: full paths, functions, modules, working directory for commands, and environment assumptions.
- Be idempotent and safe: describe retries or rollbacks for risky steps; prefer additive, testable changes.
- Validation is required: state exact test commands and expected outputs; include concise evidence (logs, transcripts, diffs) as indented examples.
## Milestones
- Tell a story (goal → work → result → proof) for each milestone; keep them narrative rather than bureaucratic.
- Each milestone must be independently verifiable and incrementally advance the overall goal.
- Milestones are distinct from Progress: milestones explain the plan; Progress tracks real-time execution.
## Living Sections (must be present and maintained)
- Progress: checkbox list with timestamps; every pause should update what is done and what remains.
- Surprises & Discoveries: unexpected behaviors, performance notes, or bugs with brief evidence.
- Decision Log: each decision with rationale and date/author.
- Outcomes & Retrospective: what was achieved, remaining gaps, and lessons learned.
## Prototyping and Parallel Paths
- Prototypes are encouraged to de-risk changes; keep them additive, clearly labeled, and validated.
- Parallel implementations are acceptable when reducing risk; describe how to validate each path and how to retire one safely.
## ExecPlan Skeleton
```md
# <Short, action-oriented description>
This ExecPlan is a living document. The sections Progress, Surprises & Discoveries, Decision Log, and Outcomes & Retrospective must stay up to date as work proceeds.
If PLANS.md is present in the repo, maintain this document in accordance with it and link back to it by path.
## Purpose / Big Picture
Explain the user-visible behavior gained after this change and how to observe it.
## Progress
- [x] (2025-10-01 13:00Z) Example completed step.
- [ ] Example incomplete step.
- [ ] Example partially completed step (completed: X; remaining: Y).
## Surprises & Discoveries
- Observation: …
Evidence: …
## Decision Log
- Decision: …
Rationale: …
Date/Author: …
## Outcomes & Retrospective
Summarize outcomes, gaps, and lessons learned; compare to the original purpose.
## Context and Orientation
Describe the current state relevant to this task as if the reader knows nothing. Name key files and modules by full path; define any non-obvious terms.
## Plan of Work
Prose description of the sequence of edits and additions. For each edit, name the file and location and what to change.
## Concrete Steps
Exact commands to run (with working directory). Include short expected outputs for comparison.
## Validation and Acceptance
Behavioral acceptance criteria plus test commands and expected results.
## Idempotence and Recovery
How to retry or roll back safely; ensure steps can be rerun without harm.
## Artifacts and Notes
Concise transcripts, diffs, or snippets as indented examples.
## Interfaces and Dependencies
Prescribe libraries, modules, and function signatures that must exist at the end. Use stable names and paths.
```
## Revising a Plan
- When the scope shifts, rewrite affected sections so the document remains coherent and self-contained.
- After significant edits, add a short note at the end explaining what changed and why.
================================================
FILE: README.md
================================================
# OpenAI Agents SDK [](https://pypi.org/project/openai-agents/)
The OpenAI Agents SDK is a lightweight yet powerful framework for building multi-agent workflows. It is provider-agnostic, supporting the OpenAI Responses and Chat Completions APIs, as well as 100+ other LLMs.
<img src="https://cdn.openai.com/API/docs/images/orchestration.png" alt="Image of the Agents Tracing UI" style="max-height: 803px;">
> [!NOTE]
> Looking for the JavaScript/TypeScript version? Check out [Agents SDK JS/TS](https://github.com/openai/openai-agents-js).
### Core concepts:
1. [**Agents**](https://openai.github.io/openai-agents-python/agents): LLMs configured with instructions, tools, guardrails, and handoffs
1. **[Agents as tools](https://openai.github.io/openai-agents-python/tools/#agents-as-tools) / [Handoffs](https://openai.github.io/openai-agents-python/handoffs/)**: Delegating to other agents for specific tasks
1. [**Tools**](https://openai.github.io/openai-agents-python/tools/): Various Tools let agents take actions (functions, MCP, hosted tools)
1. [**Guardrails**](https://openai.github.io/openai-agents-python/guardrails/): Configurable safety checks for input and output validation
1. [**Human in the loop**](https://openai.github.io/openai-agents-python/human_in_the_loop/): Built-in mechanisms for involving humans across agent runs
1. [**Sessions**](https://openai.github.io/openai-agents-python/sessions/): Automatic conversation history management across agent runs
1. [**Tracing**](https://openai.github.io/openai-agents-python/tracing/): Built-in tracking of agent runs, allowing you to view, debug and optimize your workflows
1. [**Realtime Agents**](https://openai.github.io/openai-agents-python/realtime/quickstart/): Build powerful voice agents with `gpt-realtime-1.5` and full agent features
Explore the [examples](https://github.com/openai/openai-agents-python/tree/main/examples) directory to see the SDK in action, and read our [documentation](https://openai.github.io/openai-agents-python/) for more details.
## Get started
To get started, set up your Python environment (Python 3.10 or newer required), and then install OpenAI Agents SDK package.
### venv
```bash
python -m venv .venv
source .venv/bin/activate # On Windows: .venv\Scripts\activate
pip install openai-agents
```
For voice support, install with the optional `voice` group: `pip install 'openai-agents[voice]'`. For Redis session support, install with the optional `redis` group: `pip install 'openai-agents[redis]'`.
### uv
If you're familiar with [uv](https://docs.astral.sh/uv/), installing the package would be even easier:
```bash
uv init
uv add openai-agents
```
For voice support, install with the optional `voice` group: `uv add 'openai-agents[voice]'`. For Redis session support, install with the optional `redis` group: `uv add 'openai-agents[redis]'`.
## Run your first agent
```python
from agents import Agent, Runner
agent = Agent(name="Assistant", instructions="You are a helpful assistant")
result = Runner.run_sync(agent, "Write a haiku about recursion in programming.")
print(result.final_output)
# Code within the code,
# Functions calling themselves,
# Infinite loop's dance.
```
(_If running this, ensure you set the `OPENAI_API_KEY` environment variable_)
(_For Jupyter notebook users, see [hello_world_jupyter.ipynb](https://github.com/openai/openai-agents-python/blob/main/examples/basic/hello_world_jupyter.ipynb)_)
Explore the [examples](https://github.com/openai/openai-agents-python/tree/main/examples) directory to see the SDK in action, and read our [documentation](https://openai.github.io/openai-agents-python/) for more details.
## Acknowledgements
We'd like to acknowledge the excellent work of the open-source community, especially:
- [Pydantic](https://docs.pydantic.dev/latest/) (data validation) and [PydanticAI](https://ai.pydantic.dev/) (advanced agent framework)
- [LiteLLM](https://github.com/BerriAI/litellm) (unified interface for 100+ LLMs)
- [MkDocs](https://github.com/squidfunk/mkdocs-material)
- [Griffe](https://github.com/mkdocstrings/griffe)
- [uv](https://github.com/astral-sh/uv) and [ruff](https://github.com/astral-sh/ruff)
We're committed to continuing to build the Agents SDK as an open source framework so others in the community can expand on our approach.
================================================
FILE: docs/agents.md
================================================
# Agents
Agents are the core building block in your apps. An agent is a large language model (LLM) configured with instructions, tools, and optional runtime behavior such as handoffs, guardrails, and structured outputs.
Use this page when you want to define or customize a single agent. If you are deciding how multiple agents should collaborate, read [Agent orchestration](multi_agent.md).
## Choose the next guide
Use this page as the hub for agent definition. Jump to the adjacent guide that matches the next decision you need to make.
| If you want to... | Read next |
| --- | --- |
| Choose a model or provider setup | [Models](models/index.md) |
| Add capabilities to the agent | [Tools](tools.md) |
| Decide between manager-style orchestration and handoffs | [Agent orchestration](multi_agent.md) |
| Configure handoff behavior | [Handoffs](handoffs.md) |
| Run turns, stream events, or manage conversation state | [Running agents](running_agents.md) |
| Inspect final output, run items, or resumable state | [Results](results.md) |
| Share local dependencies and runtime state | [Context management](context.md) |
## Basic configuration
The most common properties of an agent are:
| Property | Required | Description |
| --- | --- | --- |
| `name` | yes | Human-readable agent name. |
| `instructions` | yes | System prompt or dynamic instructions callback. See [Dynamic instructions](#dynamic-instructions). |
| `prompt` | no | OpenAI Responses API prompt configuration. Accepts a static prompt object or a function. See [Prompt templates](#prompt-templates). |
| `handoff_description` | no | Short description exposed when this agent is offered as a handoff target. |
| `handoffs` | no | Delegate the conversation to specialist agents. See [handoffs](handoffs.md). |
| `model` | no | Which LLM to use. See [Models](models/index.md). |
| `model_settings` | no | Model tuning parameters such as `temperature`, `top_p`, and `tool_choice`. |
| `tools` | no | Tools the agent can call. See [Tools](tools.md). |
| `mcp_servers` | no | MCP-backed tools for the agent. See the [MCP guide](mcp.md). |
| `mcp_config` | no | Fine-tune how MCP tools are prepared, such as strict schema conversion and MCP failure formatting. See the [MCP guide](mcp.md#agent-level-mcp-configuration). |
| `input_guardrails` | no | Guardrails that run on the first user input for this agent chain. See [Guardrails](guardrails.md). |
| `output_guardrails` | no | Guardrails that run on the final output for this agent. See [Guardrails](guardrails.md). |
| `output_type` | no | Structured output type instead of plain text. See [Output types](#output-types). |
| `hooks` | no | Agent-scoped lifecycle callbacks. See [Lifecycle events (hooks)](#lifecycle-events-hooks). |
| `tool_use_behavior` | no | Control whether tool results loop back to the model or end the run. See [Tool use behavior](#tool-use-behavior). |
| `reset_tool_choice` | no | Reset `tool_choice` after a tool call (default: `True`) to avoid tool-use loops. See [Forcing tool use](#forcing-tool-use). |
```python
from agents import Agent, ModelSettings, function_tool
@function_tool
def get_weather(city: str) -> str:
"""returns weather info for the specified city."""
return f"The weather in {city} is sunny"
agent = Agent(
name="Haiku agent",
instructions="Always respond in haiku form",
model="gpt-5-nano",
tools=[get_weather],
)
```
## Prompt templates
You can reference a prompt template created in the OpenAI platform by setting `prompt`. This works with OpenAI models using the Responses API.
To use it, please:
1. Go to https://platform.openai.com/playground/prompts
2. Create a new prompt variable, `poem_style`.
3. Create a system prompt with the content:
```
Write a poem in {{poem_style}}
```
4. Run the example with the `--prompt-id` flag.
```python
from agents import Agent
agent = Agent(
name="Prompted assistant",
prompt={
"id": "pmpt_123",
"version": "1",
"variables": {"poem_style": "haiku"},
},
)
```
You can also generate the prompt dynamically at run time:
```python
from dataclasses import dataclass
from agents import Agent, GenerateDynamicPromptData, Runner
@dataclass
class PromptContext:
prompt_id: str
poem_style: str
async def build_prompt(data: GenerateDynamicPromptData):
ctx: PromptContext = data.context.context
return {
"id": ctx.prompt_id,
"version": "1",
"variables": {"poem_style": ctx.poem_style},
}
agent = Agent(name="Prompted assistant", prompt=build_prompt)
result = await Runner.run(
agent,
"Say hello",
context=PromptContext(prompt_id="pmpt_123", poem_style="limerick"),
)
```
## Context
Agents are generic on their `context` type. Context is a dependency-injection tool: it's an object you create and pass to `Runner.run()`, that is passed to every agent, tool, handoff etc, and it serves as a grab bag of dependencies and state for the agent run. You can provide any Python object as the context.
Read the [context guide](context.md) for the full `RunContextWrapper` surface, shared usage tracking, nested `tool_input`, and serialization caveats.
```python
@dataclass
class UserContext:
name: str
uid: str
is_pro_user: bool
async def fetch_purchases() -> list[Purchase]:
return ...
agent = Agent[UserContext](
...,
)
```
## Output types
By default, agents produce plain text (i.e. `str`) outputs. If you want the agent to produce a particular type of output, you can use the `output_type` parameter. A common choice is to use [Pydantic](https://docs.pydantic.dev/) objects, but we support any type that can be wrapped in a Pydantic [TypeAdapter](https://docs.pydantic.dev/latest/api/type_adapter/) - dataclasses, lists, TypedDict, etc.
```python
from pydantic import BaseModel
from agents import Agent
class CalendarEvent(BaseModel):
name: str
date: str
participants: list[str]
agent = Agent(
name="Calendar extractor",
instructions="Extract calendar events from text",
output_type=CalendarEvent,
)
```
!!! note
When you pass an `output_type`, that tells the model to use [structured outputs](https://platform.openai.com/docs/guides/structured-outputs) instead of regular plain text responses.
## Multi-agent system design patterns
There are many ways to design multi‑agent systems, but we commonly see two broadly applicable patterns:
1. Manager (agents as tools): A central manager/orchestrator invokes specialized sub‑agents as tools and retains control of the conversation.
2. Handoffs: Peer agents hand off control to a specialized agent that takes over the conversation. This is decentralized.
See [our practical guide to building agents](https://cdn.openai.com/business-guides-and-resources/a-practical-guide-to-building-agents.pdf) for more details.
### Manager (agents as tools)
The `customer_facing_agent` handles all user interaction and invokes specialized sub‑agents exposed as tools. Read more in the [tools](tools.md#agents-as-tools) documentation.
```python
from agents import Agent
booking_agent = Agent(...)
refund_agent = Agent(...)
customer_facing_agent = Agent(
name="Customer-facing agent",
instructions=(
"Handle all direct user communication. "
"Call the relevant tools when specialized expertise is needed."
),
tools=[
booking_agent.as_tool(
tool_name="booking_expert",
tool_description="Handles booking questions and requests.",
),
refund_agent.as_tool(
tool_name="refund_expert",
tool_description="Handles refund questions and requests.",
)
],
)
```
### Handoffs
Handoffs are sub‑agents the agent can delegate to. When a handoff occurs, the delegated agent receives the conversation history and takes over the conversation. This pattern enables modular, specialized agents that excel at a single task. Read more in the [handoffs](handoffs.md) documentation.
```python
from agents import Agent
booking_agent = Agent(...)
refund_agent = Agent(...)
triage_agent = Agent(
name="Triage agent",
instructions=(
"Help the user with their questions. "
"If they ask about booking, hand off to the booking agent. "
"If they ask about refunds, hand off to the refund agent."
),
handoffs=[booking_agent, refund_agent],
)
```
## Dynamic instructions
In most cases, you can provide instructions when you create the agent. However, you can also provide dynamic instructions via a function. The function will receive the agent and context, and must return the prompt. Both regular and `async` functions are accepted.
```python
def dynamic_instructions(
context: RunContextWrapper[UserContext], agent: Agent[UserContext]
) -> str:
return f"The user's name is {context.context.name}. Help them with their questions."
agent = Agent[UserContext](
name="Triage agent",
instructions=dynamic_instructions,
)
```
## Lifecycle events (hooks)
Sometimes, you want to observe the lifecycle of an agent. For example, you may want to log events, pre-fetch data, or record usage when certain events occur.
There are two hook scopes:
- [`RunHooks`][agents.lifecycle.RunHooks] observe the entire `Runner.run(...)` invocation, including handoffs to other agents.
- [`AgentHooks`][agents.lifecycle.AgentHooks] are attached to a specific agent instance via `agent.hooks`.
The callback context also changes depending on the event:
- Agent start/end hooks receive [`AgentHookContext`][agents.run_context.AgentHookContext], which wraps your original context and carries the shared run usage state.
- LLM, tool, and handoff hooks receive [`RunContextWrapper`][agents.run_context.RunContextWrapper].
Typical hook timing:
- `on_agent_start` / `on_agent_end`: when a specific agent begins or finishes producing a final output.
- `on_llm_start` / `on_llm_end`: immediately around each model call.
- `on_tool_start` / `on_tool_end`: around each local tool invocation.
- `on_handoff`: when control moves from one agent to another.
Use `RunHooks` when you want a single observer for the whole workflow, and `AgentHooks` when one agent needs custom side effects.
```python
from agents import Agent, RunHooks, Runner
class LoggingHooks(RunHooks):
async def on_agent_start(self, context, agent):
print(f"Starting {agent.name}")
async def on_llm_end(self, context, agent, response):
print(f"{agent.name} produced {len(response.output)} output items")
async def on_agent_end(self, context, agent, output):
print(f"{agent.name} finished with usage: {context.usage}")
agent = Agent(name="Assistant", instructions="Be concise.")
result = await Runner.run(agent, "Explain quines", hooks=LoggingHooks())
print(result.final_output)
```
For the full callback surface, see the [Lifecycle API reference](ref/lifecycle.md).
## Guardrails
Guardrails allow you to run checks/validations on user input in parallel to the agent running, and on the agent's output once it is produced. For example, you could screen the user's input and agent's output for relevance. Read more in the [guardrails](guardrails.md) documentation.
## Cloning/copying agents
By using the `clone()` method on an agent, you can duplicate an Agent, and optionally change any properties you like.
```python
pirate_agent = Agent(
name="Pirate",
instructions="Write like a pirate",
model="gpt-5.4",
)
robot_agent = pirate_agent.clone(
name="Robot",
instructions="Write like a robot",
)
```
## Forcing tool use
Supplying a list of tools doesn't always mean the LLM will use a tool. You can force tool use by setting [`ModelSettings.tool_choice`][agents.model_settings.ModelSettings.tool_choice]. Valid values are:
1. `auto`, which allows the LLM to decide whether or not to use a tool.
2. `required`, which requires the LLM to use a tool (but it can intelligently decide which tool).
3. `none`, which requires the LLM to _not_ use a tool.
4. Setting a specific string e.g. `my_tool`, which requires the LLM to use that specific tool.
When you are using OpenAI Responses tool search, named tool choices are more limited: you cannot target bare namespace names or deferred-only tools with `tool_choice`, and `tool_choice="tool_search"` does not target [`ToolSearchTool`][agents.tool.ToolSearchTool]. In those cases, prefer `auto` or `required`. See [Hosted tool search](tools.md#hosted-tool-search) for the Responses-specific constraints.
```python
from agents import Agent, Runner, function_tool, ModelSettings
@function_tool
def get_weather(city: str) -> str:
"""Returns weather info for the specified city."""
return f"The weather in {city} is sunny"
agent = Agent(
name="Weather Agent",
instructions="Retrieve weather details.",
tools=[get_weather],
model_settings=ModelSettings(tool_choice="get_weather")
)
```
## Tool use behavior
The `tool_use_behavior` parameter in the `Agent` configuration controls how tool outputs are handled:
- `"run_llm_again"`: The default. Tools are run, and the LLM processes the results to produce a final response.
- `"stop_on_first_tool"`: The output of the first tool call is used as the final response, without further LLM processing.
```python
from agents import Agent, Runner, function_tool, ModelSettings
@function_tool
def get_weather(city: str) -> str:
"""Returns weather info for the specified city."""
return f"The weather in {city} is sunny"
agent = Agent(
name="Weather Agent",
instructions="Retrieve weather details.",
tools=[get_weather],
tool_use_behavior="stop_on_first_tool"
)
```
- `StopAtTools(stop_at_tool_names=[...])`: Stops if any specified tool is called, using its output as the final response.
```python
from agents import Agent, Runner, function_tool
from agents.agent import StopAtTools
@function_tool
def get_weather(city: str) -> str:
"""Returns weather info for the specified city."""
return f"The weather in {city} is sunny"
@function_tool
def sum_numbers(a: int, b: int) -> int:
"""Adds two numbers."""
return a + b
agent = Agent(
name="Stop At Stock Agent",
instructions="Get weather or sum numbers.",
tools=[get_weather, sum_numbers],
tool_use_behavior=StopAtTools(stop_at_tool_names=["get_weather"])
)
```
- `ToolsToFinalOutputFunction`: A custom function that processes tool results and decides whether to stop or continue with the LLM.
```python
from agents import Agent, Runner, function_tool, FunctionToolResult, RunContextWrapper
from agents.agent import ToolsToFinalOutputResult
from typing import List, Any
@function_tool
def get_weather(city: str) -> str:
"""Returns weather info for the specified city."""
return f"The weather in {city} is sunny"
def custom_tool_handler(
context: RunContextWrapper[Any],
tool_results: List[FunctionToolResult]
) -> ToolsToFinalOutputResult:
"""Processes tool results to decide final output."""
for result in tool_results:
if result.output and "sunny" in result.output:
return ToolsToFinalOutputResult(
is_final_output=True,
final_output=f"Final weather: {result.output}"
)
return ToolsToFinalOutputResult(
is_final_output=False,
final_output=None
)
agent = Agent(
name="Weather Agent",
instructions="Retrieve weather details.",
tools=[get_weather],
tool_use_behavior=custom_tool_handler
)
```
!!! note
To prevent infinite loops, the framework automatically resets `tool_choice` to "auto" after a tool call. This behavior is configurable via [`agent.reset_tool_choice`][agents.agent.Agent.reset_tool_choice]. The infinite loop is because tool results are sent to the LLM, which then generates another tool call because of `tool_choice`, ad infinitum.
================================================
FILE: docs/config.md
================================================
# Configuration
This page covers SDK-wide defaults that you usually set once during application startup, such as the default OpenAI key or client, the default OpenAI API shape, tracing export defaults, and logging behavior.
If you need to configure a specific agent or run instead, start with:
- [Running agents](running_agents.md) for `RunConfig`, sessions, and conversation-state options.
- [Models](models/index.md) for model selection and provider configuration.
- [Tracing](tracing.md) for per-run tracing metadata and custom trace processors.
## API keys and clients
By default, the SDK uses the `OPENAI_API_KEY` environment variable for LLM requests and tracing. The key is resolved when the SDK first creates an OpenAI client (lazy initialization), so set the environment variable before your first model call. If you are unable to set that environment variable before your app starts, you can use the [set_default_openai_key()][agents.set_default_openai_key] function to set the key.
```python
from agents import set_default_openai_key
set_default_openai_key("sk-...")
```
Alternatively, you can also configure an OpenAI client to be used. By default, the SDK creates an `AsyncOpenAI` instance, using the API key from the environment variable or the default key set above. You can change this by using the [set_default_openai_client()][agents.set_default_openai_client] function.
```python
from openai import AsyncOpenAI
from agents import set_default_openai_client
custom_client = AsyncOpenAI(base_url="...", api_key="...")
set_default_openai_client(custom_client)
```
Finally, you can also customize the OpenAI API that is used. By default, we use the OpenAI Responses API. You can override this to use the Chat Completions API by using the [set_default_openai_api()][agents.set_default_openai_api] function.
```python
from agents import set_default_openai_api
set_default_openai_api("chat_completions")
```
## Tracing
Tracing is enabled by default. By default it uses the same OpenAI API key as your model requests from the section above (that is, the environment variable or the default key you set). You can specifically set the API key used for tracing by using the [`set_tracing_export_api_key`][agents.set_tracing_export_api_key] function.
```python
from agents import set_tracing_export_api_key
set_tracing_export_api_key("sk-...")
```
If you need to attribute traces to a specific organization or project when using the default exporter, set these environment variables before your app starts:
```bash
export OPENAI_ORG_ID="org_..."
export OPENAI_PROJECT_ID="proj_..."
```
You can also set a tracing API key per run without changing the global exporter.
```python
from agents import Runner, RunConfig
await Runner.run(
agent,
input="Hello",
run_config=RunConfig(tracing={"api_key": "sk-tracing-123"}),
)
```
You can also disable tracing entirely by using the [`set_tracing_disabled()`][agents.set_tracing_disabled] function.
```python
from agents import set_tracing_disabled
set_tracing_disabled(True)
```
If you want to keep tracing enabled but exclude potentially sensitive inputs/outputs from trace payloads, set [`RunConfig.trace_include_sensitive_data`][agents.run.RunConfig.trace_include_sensitive_data] to `False`:
```python
from agents import Runner, RunConfig
await Runner.run(
agent,
input="Hello",
run_config=RunConfig(trace_include_sensitive_data=False),
)
```
You can also change the default without code by setting this environment variable before your app starts:
```bash
export OPENAI_AGENTS_TRACE_INCLUDE_SENSITIVE_DATA=0
```
For full tracing controls, see the [tracing guide](tracing.md).
## Debug logging
The SDK defines two Python loggers (`openai.agents` and `openai.agents.tracing`) and does not attach handlers by default. Logs follow your application's Python logging configuration.
To enable verbose logging, use the [`enable_verbose_stdout_logging()`][agents.enable_verbose_stdout_logging] function.
```python
from agents import enable_verbose_stdout_logging
enable_verbose_stdout_logging()
```
Alternatively, you can customize the logs by adding handlers, filters, formatters, etc. You can read more in the [Python logging guide](https://docs.python.org/3/howto/logging.html).
```python
import logging
logger = logging.getLogger("openai.agents") # or openai.agents.tracing for the Tracing logger
# To make all logs show up
logger.setLevel(logging.DEBUG)
# To make info and above show up
logger.setLevel(logging.INFO)
# To make warning and above show up
logger.setLevel(logging.WARNING)
# etc
# You can customize this as needed, but this will output to `stderr` by default
logger.addHandler(logging.StreamHandler())
```
### Sensitive data in logs
Certain logs may contain sensitive data (for example, user data).
By default, the SDK does **not** log LLM inputs/outputs or tool inputs/outputs. These protections are controlled by:
```bash
OPENAI_AGENTS_DONT_LOG_MODEL_DATA=1
OPENAI_AGENTS_DONT_LOG_TOOL_DATA=1
```
If you need to include this data temporarily for debugging, set either variable to `0` (or `false`) before your app starts:
```bash
export OPENAI_AGENTS_DONT_LOG_MODEL_DATA=0
export OPENAI_AGENTS_DONT_LOG_TOOL_DATA=0
```
================================================
FILE: docs/context.md
================================================
# Context management
Context is an overloaded term. There are two main classes of context you might care about:
1. Context available locally to your code: this is data and dependencies you might need when tool functions run, during callbacks like `on_handoff`, in lifecycle hooks, etc.
2. Context available to LLMs: this is data the LLM sees when generating a response.
## Local context
This is represented via the [`RunContextWrapper`][agents.run_context.RunContextWrapper] class and the [`context`][agents.run_context.RunContextWrapper.context] property within it. The way this works is:
1. You create any Python object you want. A common pattern is to use a dataclass or a Pydantic object.
2. You pass that object to the various run methods (e.g. `Runner.run(..., context=whatever)`).
3. All your tool calls, lifecycle hooks etc will be passed a wrapper object, `RunContextWrapper[T]`, where `T` represents your context object type which you can access via `wrapper.context`.
The **most important** thing to be aware of: every agent, tool function, lifecycle etc for a given agent run must use the same _type_ of context.
You can use the context for things like:
- Contextual data for your run (e.g. things like a username/uid or other information about the user)
- Dependencies (e.g. logger objects, data fetchers, etc)
- Helper functions
!!! danger "Note"
The context object is **not** sent to the LLM. It is purely a local object that you can read from, write to and call methods on it.
Within a single run, derived wrappers share the same underlying app context, approval state, and usage tracking. Nested [`Agent.as_tool()`][agents.agent.Agent.as_tool] runs may attach a different `tool_input`, but they do not get an isolated copy of your app state by default.
### What `RunContextWrapper` exposes
[`RunContextWrapper`][agents.run_context.RunContextWrapper] is a wrapper around your app-defined context object. In practice you will most often use:
- [`wrapper.context`][agents.run_context.RunContextWrapper.context] for your own mutable app state and dependencies.
- [`wrapper.usage`][agents.run_context.RunContextWrapper.usage] for aggregated request and token usage across the current run.
- [`wrapper.tool_input`][agents.run_context.RunContextWrapper.tool_input] for structured input when the current run is executing inside [`Agent.as_tool()`][agents.agent.Agent.as_tool].
- [`wrapper.approve_tool(...)`][agents.run_context.RunContextWrapper.approve_tool] / [`wrapper.reject_tool(...)`][agents.run_context.RunContextWrapper.reject_tool] when you need to update approval state programmatically.
Only `wrapper.context` is your app-defined object. The other fields are runtime metadata managed by the SDK.
If you later serialize a [`RunState`][agents.run_state.RunState] for human-in-the-loop or durable job workflows, that runtime metadata is saved with the state. Avoid putting secrets in [`RunContextWrapper.context`][agents.run_context.RunContextWrapper.context] if you intend to persist or transmit serialized state.
Conversation state is a separate concern. Use `result.to_input_list()`, `session`, `conversation_id`, or `previous_response_id` depending on how you want to carry turns forward. See [results](results.md), [running agents](running_agents.md), and [sessions](sessions/index.md) for that decision.
```python
import asyncio
from dataclasses import dataclass
from agents import Agent, RunContextWrapper, Runner, function_tool
@dataclass
class UserInfo: # (1)!
name: str
uid: int
@function_tool
async def fetch_user_age(wrapper: RunContextWrapper[UserInfo]) -> str: # (2)!
"""Fetch the age of the user. Call this function to get user's age information."""
return f"The user {wrapper.context.name} is 47 years old"
async def main():
user_info = UserInfo(name="John", uid=123)
agent = Agent[UserInfo]( # (3)!
name="Assistant",
tools=[fetch_user_age],
)
result = await Runner.run( # (4)!
starting_agent=agent,
input="What is the age of the user?",
context=user_info,
)
print(result.final_output) # (5)!
# The user John is 47 years old.
if __name__ == "__main__":
asyncio.run(main())
```
1. This is the context object. We've used a dataclass here, but you can use any type.
2. This is a tool. You can see it takes a `RunContextWrapper[UserInfo]`. The tool implementation reads from the context.
3. We mark the agent with the generic `UserInfo`, so that the typechecker can catch errors (for example, if we tried to pass a tool that took a different context type).
4. The context is passed to the `run` function.
5. The agent correctly calls the tool and gets the age.
---
### Advanced: `ToolContext`
In some cases, you might want to access extra metadata about the tool being executed — such as its name, call ID, or raw argument string.
For this, you can use the [`ToolContext`][agents.tool_context.ToolContext] class, which extends `RunContextWrapper`.
```python
from typing import Annotated
from pydantic import BaseModel, Field
from agents import Agent, Runner, function_tool
from agents.tool_context import ToolContext
class WeatherContext(BaseModel):
user_id: str
class Weather(BaseModel):
city: str = Field(description="The city name")
temperature_range: str = Field(description="The temperature range in Celsius")
conditions: str = Field(description="The weather conditions")
@function_tool
def get_weather(ctx: ToolContext[WeatherContext], city: Annotated[str, "The city to get the weather for"]) -> Weather:
print(f"[debug] Tool context: (name: {ctx.tool_name}, call_id: {ctx.tool_call_id}, args: {ctx.tool_arguments})")
return Weather(city=city, temperature_range="14-20C", conditions="Sunny with wind.")
agent = Agent(
name="Weather Agent",
instructions="You are a helpful agent that can tell the weather of a given city.",
tools=[get_weather],
)
```
`ToolContext` provides the same `.context` property as `RunContextWrapper`,
plus additional fields specific to the current tool call:
- `tool_name` – the name of the tool being invoked
- `tool_call_id` – a unique identifier for this tool call
- `tool_arguments` – the raw argument string passed to the tool
- `tool_namespace` – the Responses namespace for the tool call, when the tool was loaded through `tool_namespace()` or another namespaced surface
- `qualified_tool_name` – the tool name qualified with the namespace when one is available
Use `ToolContext` when you need tool-level metadata during execution.
For general context sharing between agents and tools, `RunContextWrapper` remains sufficient. Because `ToolContext` extends `RunContextWrapper`, it can also expose `.tool_input` when a nested `Agent.as_tool()` run supplied structured input.
---
## Agent/LLM context
When an LLM is called, the **only** data it can see is from the conversation history. This means that if you want to make some new data available to the LLM, you must do it in a way that makes it available in that history. There are a few ways to do this:
1. You can add it to the Agent `instructions`. This is also known as a "system prompt" or "developer message". System prompts can be static strings, or they can be dynamic functions that receive the context and output a string. This is a common tactic for information that is always useful (for example, the user's name or the current date).
2. Add it to the `input` when calling the `Runner.run` functions. This is similar to the `instructions` tactic, but allows you to have messages that are lower in the [chain of command](https://cdn.openai.com/spec/model-spec-2024-05-08.html#follow-the-chain-of-command).
3. Expose it via function tools. This is useful for _on-demand_ context - the LLM decides when it needs some data, and can call the tool to fetch that data.
4. Use retrieval or web search. These are special tools that are able to fetch relevant data from files or databases (retrieval), or from the web (web search). This is useful for "grounding" the response in relevant contextual data.
================================================
FILE: docs/examples.md
================================================
# Examples
Check out a variety of sample implementations of the SDK in the examples section of the [repo](https://github.com/openai/openai-agents-python/tree/main/examples). The examples are organized into several categories that demonstrate different patterns and capabilities.
## Categories
- **[agent_patterns](https://github.com/openai/openai-agents-python/tree/main/examples/agent_patterns):**
Examples in this category illustrate common agent design patterns, such as
- Deterministic workflows
- Agents as tools
- Parallel agent execution
- Conditional tool usage
- Input/output guardrails
- LLM as a judge
- Routing
- Streaming guardrails
- Custom rejection messages for approval flows (`examples/agent_patterns/human_in_the_loop_custom_rejection.py`)
- **[basic](https://github.com/openai/openai-agents-python/tree/main/examples/basic):**
These examples showcase foundational capabilities of the SDK, such as
- Hello world examples (Default model, GPT-5, open-weight model)
- Agent lifecycle management
- Dynamic system prompts
- Streaming outputs (text, items, function call args)
- Responses websocket transport with a shared session helper across turns (`examples/basic/stream_ws.py`)
- Prompt templates
- File handling (local and remote, images and PDFs)
- Usage tracking
- Runner-managed retry settings (`examples/basic/retry.py`)
- Runner-managed retries with LiteLLM (`examples/basic/retry_litellm.py`)
- Non-strict output types
- Previous response ID usage
- **[customer_service](https://github.com/openai/openai-agents-python/tree/main/examples/customer_service):**
Example customer service system for an airline.
- **[financial_research_agent](https://github.com/openai/openai-agents-python/tree/main/examples/financial_research_agent):**
A financial research agent that demonstrates structured research workflows with agents and tools for financial data analysis.
- **[handoffs](https://github.com/openai/openai-agents-python/tree/main/examples/handoffs):**
See practical examples of agent handoffs with message filtering.
- **[hosted_mcp](https://github.com/openai/openai-agents-python/tree/main/examples/hosted_mcp):**
Examples demonstrating how to use hosted MCP (Model Context Protocol) connectors and approvals.
- **[mcp](https://github.com/openai/openai-agents-python/tree/main/examples/mcp):**
Learn how to build agents with MCP (Model Context Protocol), including:
- Filesystem examples
- Git examples
- MCP prompt server examples
- SSE (Server-Sent Events) examples
- Streamable HTTP examples
- **[memory](https://github.com/openai/openai-agents-python/tree/main/examples/memory):**
Examples of different memory implementations for agents, including:
- SQLite session storage
- Advanced SQLite session storage
- Redis session storage
- SQLAlchemy session storage
- Dapr state store session storage
- Encrypted session storage
- OpenAI Conversations session storage
- Responses compaction session storage
- Stateless Responses compaction with `ModelSettings(store=False)` (`examples/memory/compaction_session_stateless_example.py`)
- **[model_providers](https://github.com/openai/openai-agents-python/tree/main/examples/model_providers):**
Explore how to use non-OpenAI models with the SDK, including custom providers and LiteLLM integration.
- **[realtime](https://github.com/openai/openai-agents-python/tree/main/examples/realtime):**
Examples showing how to build real-time experiences using the SDK, including:
- Web application patterns with structured text and image messages
- Command-line audio loops and playback handling
- Twilio Media Streams integration over WebSocket
- Twilio SIP integration using Realtime Calls API attach flows
- **[reasoning_content](https://github.com/openai/openai-agents-python/tree/main/examples/reasoning_content):**
Examples demonstrating how to work with reasoning content and structured outputs.
- **[research_bot](https://github.com/openai/openai-agents-python/tree/main/examples/research_bot):**
Simple deep research clone that demonstrates complex multi-agent research workflows.
- **[tools](https://github.com/openai/openai-agents-python/tree/main/examples/tools):**
Learn how to implement OAI hosted tools and experimental Codex tooling such as:
- Web search and web search with filters
- File search
- Code interpreter
- Hosted container shell with inline skills (`examples/tools/container_shell_inline_skill.py`)
- Hosted container shell with skill references (`examples/tools/container_shell_skill_reference.py`)
- Local shell with local skills (`examples/tools/local_shell_skill.py`)
- Tool search with namespaces and deferred tools (`examples/tools/tool_search.py`)
- Computer use
- Image generation
- Experimental Codex tool workflows (`examples/tools/codex.py`)
- Experimental Codex same-thread workflows (`examples/tools/codex_same_thread.py`)
- **[voice](https://github.com/openai/openai-agents-python/tree/main/examples/voice):**
See examples of voice agents, using our TTS and STT models, including streamed voice examples.
================================================
FILE: docs/guardrails.md
================================================
# Guardrails
Guardrails enable you to do checks and validations of user input and agent output. For example, imagine you have an agent that uses a very smart (and hence slow/expensive) model to help with customer requests. You wouldn't want malicious users to ask the model to help them with their math homework. So, you can run a guardrail with a fast/cheap model. If the guardrail detects malicious usage, it can immediately raise an error and prevent the expensive model from running, saving you time and money (**when using blocking guardrails; for parallel guardrails, the expensive model may have already started running before the guardrail completes. See "Execution modes" below for details**).
There are two kinds of guardrails:
1. Input guardrails run on the initial user input
2. Output guardrails run on the final agent output
## Workflow boundaries
Guardrails are attached to agents and tools, but they do not all run at the same points in a workflow:
- **Input guardrails** run only for the first agent in the chain.
- **Output guardrails** run only for the agent that produces the final output.
- **Tool guardrails** run on every custom function-tool invocation, with input guardrails before execution and output guardrails after execution.
If you need checks around each custom function-tool call in a workflow that includes managers, handoffs, or delegated specialists, use tool guardrails instead of relying only on agent-level input/output guardrails.
## Input guardrails
Input guardrails run in 3 steps:
1. First, the guardrail receives the same input passed to the agent.
2. Next, the guardrail function runs to produce a [`GuardrailFunctionOutput`][agents.guardrail.GuardrailFunctionOutput], which is then wrapped in an [`InputGuardrailResult`][agents.guardrail.InputGuardrailResult]
3. Finally, we check if [`.tripwire_triggered`][agents.guardrail.GuardrailFunctionOutput.tripwire_triggered] is true. If true, an [`InputGuardrailTripwireTriggered`][agents.exceptions.InputGuardrailTripwireTriggered] exception is raised, so you can appropriately respond to the user or handle the exception.
!!! Note
Input guardrails are intended to run on user input, so an agent's guardrails only run if the agent is the *first* agent. You might wonder, why is the `guardrails` property on the agent instead of passed to `Runner.run`? It's because guardrails tend to be related to the actual Agent - you'd run different guardrails for different agents, so colocating the code is useful for readability.
### Execution modes
Input guardrails support two execution modes:
- **Parallel execution** (default, `run_in_parallel=True`): The guardrail runs concurrently with the agent's execution. This provides the best latency since both start at the same time. However, if the guardrail fails, the agent may have already consumed tokens and executed tools before being cancelled.
- **Blocking execution** (`run_in_parallel=False`): The guardrail runs and completes *before* the agent starts. If the guardrail tripwire is triggered, the agent never executes, preventing token consumption and tool execution. This is ideal for cost optimization and when you want to avoid potential side effects from tool calls.
## Output guardrails
Output guardrails run in 3 steps:
1. First, the guardrail receives the output produced by the agent.
2. Next, the guardrail function runs to produce a [`GuardrailFunctionOutput`][agents.guardrail.GuardrailFunctionOutput], which is then wrapped in an [`OutputGuardrailResult`][agents.guardrail.OutputGuardrailResult]
3. Finally, we check if [`.tripwire_triggered`][agents.guardrail.GuardrailFunctionOutput.tripwire_triggered] is true. If true, an [`OutputGuardrailTripwireTriggered`][agents.exceptions.OutputGuardrailTripwireTriggered] exception is raised, so you can appropriately respond to the user or handle the exception.
!!! Note
Output guardrails are intended to run on the final agent output, so an agent's guardrails only run if the agent is the *last* agent. Similar to the input guardrails, we do this because guardrails tend to be related to the actual Agent - you'd run different guardrails for different agents, so colocating the code is useful for readability.
Output guardrails always run after the agent completes, so they don't support the `run_in_parallel` parameter.
## Tool guardrails
Tool guardrails wrap **function tools** and let you validate or block tool calls before and after execution. They are configured on the tool itself and run every time that tool is invoked.
- Input tool guardrails run before the tool executes and can skip the call, replace the output with a message, or raise a tripwire.
- Output tool guardrails run after the tool executes and can replace the output or raise a tripwire.
- Tool guardrails apply only to function tools created with [`function_tool`][agents.tool.function_tool]. Handoffs run through the SDK's handoff pipeline rather than the normal function-tool pipeline, so tool guardrails do not apply to the handoff call itself. Hosted tools (`WebSearchTool`, `FileSearchTool`, `HostedMCPTool`, `CodeInterpreterTool`, `ImageGenerationTool`) and built-in execution tools (`ComputerTool`, `ShellTool`, `ApplyPatchTool`, `LocalShellTool`) also do not use this guardrail pipeline, and [`Agent.as_tool()`][agents.agent.Agent.as_tool] does not currently expose tool-guardrail options directly.
See the code snippet below for details.
## Tripwires
If the input or output fails the guardrail, the Guardrail can signal this with a tripwire. As soon as we see a guardrail that has triggered the tripwires, we immediately raise a `{Input,Output}GuardrailTripwireTriggered` exception and halt the Agent execution.
## Implementing a guardrail
You need to provide a function that receives input, and returns a [`GuardrailFunctionOutput`][agents.guardrail.GuardrailFunctionOutput]. In this example, we'll do this by running an Agent under the hood.
```python
from pydantic import BaseModel
from agents import (
Agent,
GuardrailFunctionOutput,
InputGuardrailTripwireTriggered,
RunContextWrapper,
Runner,
TResponseInputItem,
input_guardrail,
)
class MathHomeworkOutput(BaseModel):
is_math_homework: bool
reasoning: str
guardrail_agent = Agent( # (1)!
name="Guardrail check",
instructions="Check if the user is asking you to do their math homework.",
output_type=MathHomeworkOutput,
)
@input_guardrail
async def math_guardrail( # (2)!
ctx: RunContextWrapper[None], agent: Agent, input: str | list[TResponseInputItem]
) -> GuardrailFunctionOutput:
result = await Runner.run(guardrail_agent, input, context=ctx.context)
return GuardrailFunctionOutput(
output_info=result.final_output, # (3)!
tripwire_triggered=result.final_output.is_math_homework,
)
agent = Agent( # (4)!
name="Customer support agent",
instructions="You are a customer support agent. You help customers with their questions.",
input_guardrails=[math_guardrail],
)
async def main():
# This should trip the guardrail
try:
await Runner.run(agent, "Hello, can you help me solve for x: 2x + 3 = 11?")
print("Guardrail didn't trip - this is unexpected")
except InputGuardrailTripwireTriggered:
print("Math homework guardrail tripped")
```
1. We'll use this agent in our guardrail function.
2. This is the guardrail function that receives the agent's input/context, and returns the result.
3. We can include extra information in the guardrail result.
4. This is the actual agent that defines the workflow.
Output guardrails are similar.
```python
from pydantic import BaseModel
from agents import (
Agent,
GuardrailFunctionOutput,
OutputGuardrailTripwireTriggered,
RunContextWrapper,
Runner,
output_guardrail,
)
class MessageOutput(BaseModel): # (1)!
response: str
class MathOutput(BaseModel): # (2)!
reasoning: str
is_math: bool
guardrail_agent = Agent(
name="Guardrail check",
instructions="Check if the output includes any math.",
output_type=MathOutput,
)
@output_guardrail
async def math_guardrail( # (3)!
ctx: RunContextWrapper, agent: Agent, output: MessageOutput
) -> GuardrailFunctionOutput:
result = await Runner.run(guardrail_agent, output.response, context=ctx.context)
return GuardrailFunctionOutput(
output_info=result.final_output,
tripwire_triggered=result.final_output.is_math,
)
agent = Agent( # (4)!
name="Customer support agent",
instructions="You are a customer support agent. You help customers with their questions.",
output_guardrails=[math_guardrail],
output_type=MessageOutput,
)
async def main():
# This should trip the guardrail
try:
await Runner.run(agent, "Hello, can you help me solve for x: 2x + 3 = 11?")
print("Guardrail didn't trip - this is unexpected")
except OutputGuardrailTripwireTriggered:
print("Math output guardrail tripped")
```
1. This is the actual agent's output type.
2. This is the guardrail's output type.
3. This is the guardrail function that receives the agent's output, and returns the result.
4. This is the actual agent that defines the workflow.
Lastly, here are examples of tool guardrails.
```python
import json
from agents import (
Agent,
Runner,
ToolGuardrailFunctionOutput,
function_tool,
tool_input_guardrail,
tool_output_guardrail,
)
@tool_input_guardrail
def block_secrets(data):
args = json.loads(data.context.tool_arguments or "{}")
if "sk-" in json.dumps(args):
return ToolGuardrailFunctionOutput.reject_content(
"Remove secrets before calling this tool."
)
return ToolGuardrailFunctionOutput.allow()
@tool_output_guardrail
def redact_output(data):
text = str(data.output or "")
if "sk-" in text:
return ToolGuardrailFunctionOutput.reject_content("Output contained sensitive data.")
return ToolGuardrailFunctionOutput.allow()
@function_tool(
tool_input_guardrails=[block_secrets],
tool_output_guardrails=[redact_output],
)
def classify_text(text: str) -> str:
"""Classify text for internal routing."""
return f"length:{len(text)}"
agent = Agent(name="Classifier", tools=[classify_text])
result = Runner.run_sync(agent, "hello world")
print(result.final_output)
```
================================================
FILE: docs/handoffs.md
================================================
# Handoffs
Handoffs allow an agent to delegate tasks to another agent. This is particularly useful in scenarios where different agents specialize in distinct areas. For example, a customer support app might have agents that each specifically handle tasks like order status, refunds, FAQs, etc.
Handoffs are represented as tools to the LLM. So if there's a handoff to an agent named `Refund Agent`, the tool would be called `transfer_to_refund_agent`.
## Creating a handoff
All agents have a [`handoffs`][agents.agent.Agent.handoffs] param, which can either take an `Agent` directly, or a `Handoff` object that customizes the Handoff.
If you pass plain `Agent` instances, their [`handoff_description`][agents.agent.Agent.handoff_description] (when set) is appended to the default tool description. Use it to hint when the model should pick that handoff without writing a full `handoff()` object.
You can create a handoff using the [`handoff()`][agents.handoffs.handoff] function provided by the Agents SDK. This function allows you to specify the agent to hand off to, along with optional overrides and input filters.
### Basic usage
Here's how you can create a simple handoff:
```python
from agents import Agent, handoff
billing_agent = Agent(name="Billing agent")
refund_agent = Agent(name="Refund agent")
# (1)!
triage_agent = Agent(name="Triage agent", handoffs=[billing_agent, handoff(refund_agent)])
```
1. You can use the agent directly (as in `billing_agent`), or you can use the `handoff()` function.
### Customizing handoffs via the `handoff()` function
The [`handoff()`][agents.handoffs.handoff] function lets you customize things.
- `agent`: This is the agent to which things will be handed off.
- `tool_name_override`: By default, the `Handoff.default_tool_name()` function is used, which resolves to `transfer_to_<agent_name>`. You can override this.
- `tool_description_override`: Override the default tool description from `Handoff.default_tool_description()`
- `on_handoff`: A callback function executed when the handoff is invoked. This is useful for things like kicking off some data fetching as soon as you know a handoff is being invoked. This function receives the agent context, and can optionally also receive LLM generated input. The input data is controlled by the `input_type` param.
- `input_type`: The schema for the handoff tool-call arguments. When set, the parsed payload is passed to `on_handoff`.
- `input_filter`: This lets you filter the input received by the next agent. See below for more.
- `is_enabled`: Whether the handoff is enabled. This can be a boolean or a function that returns a boolean, allowing you to dynamically enable or disable the handoff at runtime.
- `nest_handoff_history`: Optional per-call override for the RunConfig-level `nest_handoff_history` setting. If `None`, the value defined in the active run configuration is used instead.
The [`handoff()`][agents.handoffs.handoff] helper always transfers control to the specific `agent` you passed in. If you have multiple possible destinations, register one handoff per destination and let the model choose among them. Use a custom [`Handoff`][agents.handoffs.Handoff] only when your own handoff code must decide which agent to return at invocation time.
```python
from agents import Agent, handoff, RunContextWrapper
def on_handoff(ctx: RunContextWrapper[None]):
print("Handoff called")
agent = Agent(name="My agent")
handoff_obj = handoff(
agent=agent,
on_handoff=on_handoff,
tool_name_override="custom_handoff_tool",
tool_description_override="Custom description",
)
```
## Handoff inputs
In certain situations, you want the LLM to provide some data when it calls a handoff. For example, imagine a handoff to an "Escalation agent". You might want a reason to be provided, so you can log it.
```python
from pydantic import BaseModel
from agents import Agent, handoff, RunContextWrapper
class EscalationData(BaseModel):
reason: str
async def on_handoff(ctx: RunContextWrapper[None], input_data: EscalationData):
print(f"Escalation agent called with reason: {input_data.reason}")
agent = Agent(name="Escalation agent")
handoff_obj = handoff(
agent=agent,
on_handoff=on_handoff,
input_type=EscalationData,
)
```
`input_type` describes the arguments for the handoff tool call itself. The SDK exposes that schema to the model as the handoff tool's `parameters`, validates the returned JSON locally, and passes the parsed value to `on_handoff`.
It does not replace the next agent's main input, and it does not choose a different destination. The [`handoff()`][agents.handoffs.handoff] helper still transfers to the specific agent you wrapped, and the receiving agent still sees the conversation history unless you change it with an [`input_filter`][agents.handoffs.Handoff.input_filter] or nested handoff history settings.
`input_type` is also separate from [`RunContextWrapper.context`][agents.run_context.RunContextWrapper.context]. Use `input_type` for metadata the model decides at handoff time, not for application state or dependencies you already have locally.
### When to use `input_type`
Use `input_type` when the handoff needs a small piece of model-generated metadata such as `reason`, `language`, `priority`, or `summary`. For example, a triage agent can hand off to a refund agent with `{ "reason": "duplicate_charge", "priority": "high" }`, and `on_handoff` can log or persist that metadata before the refund agent takes over.
Choose a different mechanism when the goal is different:
- Put existing application state and dependencies in [`RunContextWrapper.context`][agents.run_context.RunContextWrapper.context]. See the [context guide](context.md).
- Use [`input_filter`][agents.handoffs.Handoff.input_filter], [`RunConfig.nest_handoff_history`][agents.run.RunConfig.nest_handoff_history], or [`RunConfig.handoff_history_mapper`][agents.run.RunConfig.handoff_history_mapper] if you want to change what history the receiving agent sees.
- Register one handoff per destination if there are multiple possible specialists. `input_type` can add metadata to the chosen handoff, but it does not dispatch between destinations.
- If you want structured input for a nested specialist without transferring the conversation, prefer [`Agent.as_tool(parameters=...)`][agents.agent.Agent.as_tool]. See [tools](tools.md#structured-input-for-tool-agents).
## Input filters
When a handoff occurs, it's as though the new agent takes over the conversation, and gets to see the entire previous conversation history. If you want to change this, you can set an [`input_filter`][agents.handoffs.Handoff.input_filter]. An input filter is a function that receives the existing input via a [`HandoffInputData`][agents.handoffs.HandoffInputData], and must return a new `HandoffInputData`.
[`HandoffInputData`][agents.handoffs.HandoffInputData] includes:
- `input_history`: the input history before `Runner.run(...)` started.
- `pre_handoff_items`: items generated before the agent turn where the handoff was invoked.
- `new_items`: items generated during the current turn, including the handoff call and handoff output items.
- `input_items`: optional items to forward to the next agent instead of `new_items`, allowing you to filter model input while keeping `new_i
gitextract_tsktwg2s/
├── .agents/
│ └── skills/
│ ├── code-change-verification/
│ │ ├── SKILL.md
│ │ ├── agents/
│ │ │ └── openai.yaml
│ │ └── scripts/
│ │ ├── run.ps1
│ │ └── run.sh
│ ├── docs-sync/
│ │ ├── SKILL.md
│ │ ├── agents/
│ │ │ └── openai.yaml
│ │ └── references/
│ │ └── doc-coverage-checklist.md
│ ├── examples-auto-run/
│ │ ├── SKILL.md
│ │ ├── agents/
│ │ │ └── openai.yaml
│ │ └── scripts/
│ │ └── run.sh
│ ├── final-release-review/
│ │ ├── SKILL.md
│ │ ├── agents/
│ │ │ └── openai.yaml
│ │ ├── references/
│ │ │ └── review-checklist.md
│ │ └── scripts/
│ │ └── find_latest_release_tag.sh
│ ├── implementation-strategy/
│ │ ├── SKILL.md
│ │ └── agents/
│ │ └── openai.yaml
│ ├── openai-knowledge/
│ │ ├── SKILL.md
│ │ └── agents/
│ │ └── openai.yaml
│ ├── pr-draft-summary/
│ │ ├── SKILL.md
│ │ └── agents/
│ │ └── openai.yaml
│ └── test-coverage-improver/
│ ├── SKILL.md
│ └── agents/
│ └── openai.yaml
├── .github/
│ ├── ISSUE_TEMPLATE/
│ │ ├── bug_report.md
│ │ ├── feature_request.md
│ │ ├── model_provider.md
│ │ └── question.md
│ ├── PULL_REQUEST_TEMPLATE/
│ │ └── pull_request_template.md
│ ├── codex/
│ │ ├── prompts/
│ │ │ ├── pr-labels.md
│ │ │ └── release-review.md
│ │ └── schemas/
│ │ └── pr-labels.json
│ ├── dependabot.yml
│ ├── scripts/
│ │ ├── detect-changes.sh
│ │ ├── pr_labels.py
│ │ ├── run-asyncio-teardown-stability.sh
│ │ └── select-release-milestone.py
│ └── workflows/
│ ├── docs.yml
│ ├── issues.yml
│ ├── pr-labels.yml
│ ├── publish.yml
│ ├── release-pr-update.yml
│ ├── release-pr.yml
│ ├── release-tag.yml
│ ├── tests.yml
│ └── update-docs.yml
├── .gitignore
├── .prettierrc
├── .vscode/
│ ├── launch.json
│ └── settings.json
├── AGENTS.md
├── CLAUDE.md
├── LICENSE
├── Makefile
├── PLANS.md
├── README.md
├── docs/
│ ├── agents.md
│ ├── config.md
│ ├── context.md
│ ├── examples.md
│ ├── guardrails.md
│ ├── handoffs.md
│ ├── human_in_the_loop.md
│ ├── index.md
│ ├── ja/
│ │ ├── agents.md
│ │ ├── config.md
│ │ ├── context.md
│ │ ├── examples.md
│ │ ├── guardrails.md
│ │ ├── handoffs.md
│ │ ├── human_in_the_loop.md
│ │ ├── index.md
│ │ ├── mcp.md
│ │ ├── models/
│ │ │ ├── index.md
│ │ │ └── litellm.md
│ │ ├── multi_agent.md
│ │ ├── quickstart.md
│ │ ├── realtime/
│ │ │ ├── guide.md
│ │ │ ├── quickstart.md
│ │ │ └── transport.md
│ │ ├── release.md
│ │ ├── repl.md
│ │ ├── results.md
│ │ ├── running_agents.md
│ │ ├── sessions/
│ │ │ ├── advanced_sqlite_session.md
│ │ │ ├── encrypted_session.md
│ │ │ ├── index.md
│ │ │ └── sqlalchemy_session.md
│ │ ├── sessions.md
│ │ ├── streaming.md
│ │ ├── tools.md
│ │ ├── tracing.md
│ │ ├── usage.md
│ │ ├── visualization.md
│ │ └── voice/
│ │ ├── pipeline.md
│ │ ├── quickstart.md
│ │ └── tracing.md
│ ├── ko/
│ │ ├── agents.md
│ │ ├── config.md
│ │ ├── context.md
│ │ ├── examples.md
│ │ ├── guardrails.md
│ │ ├── handoffs.md
│ │ ├── human_in_the_loop.md
│ │ ├── index.md
│ │ ├── mcp.md
│ │ ├── models/
│ │ │ ├── index.md
│ │ │ └── litellm.md
│ │ ├── multi_agent.md
│ │ ├── quickstart.md
│ │ ├── realtime/
│ │ │ ├── guide.md
│ │ │ ├── quickstart.md
│ │ │ └── transport.md
│ │ ├── release.md
│ │ ├── repl.md
│ │ ├── results.md
│ │ ├── running_agents.md
│ │ ├── sessions/
│ │ │ ├── advanced_sqlite_session.md
│ │ │ ├── encrypted_session.md
│ │ │ ├── index.md
│ │ │ └── sqlalchemy_session.md
│ │ ├── sessions.md
│ │ ├── streaming.md
│ │ ├── tools.md
│ │ ├── tracing.md
│ │ ├── usage.md
│ │ ├── visualization.md
│ │ └── voice/
│ │ ├── pipeline.md
│ │ ├── quickstart.md
│ │ └── tracing.md
│ ├── llms-full.txt
│ ├── llms.txt
│ ├── mcp.md
│ ├── models/
│ │ ├── index.md
│ │ └── litellm.md
│ ├── multi_agent.md
│ ├── quickstart.md
│ ├── realtime/
│ │ ├── guide.md
│ │ ├── quickstart.md
│ │ └── transport.md
│ ├── ref/
│ │ ├── agent.md
│ │ ├── agent_output.md
│ │ ├── agent_tool_input.md
│ │ ├── agent_tool_state.md
│ │ ├── apply_diff.md
│ │ ├── computer.md
│ │ ├── editor.md
│ │ ├── exceptions.md
│ │ ├── extensions/
│ │ │ ├── experimental/
│ │ │ │ └── codex/
│ │ │ │ ├── codex.md
│ │ │ │ ├── codex_options.md
│ │ │ │ ├── codex_tool.md
│ │ │ │ ├── events.md
│ │ │ │ ├── exec.md
│ │ │ │ ├── items.md
│ │ │ │ ├── output_schema_file.md
│ │ │ │ ├── payloads.md
│ │ │ │ ├── thread.md
│ │ │ │ ├── thread_options.md
│ │ │ │ └── turn_options.md
│ │ │ ├── handoff_filters.md
│ │ │ ├── handoff_prompt.md
│ │ │ ├── litellm.md
│ │ │ ├── memory/
│ │ │ │ ├── advanced_sqlite_session.md
│ │ │ │ ├── async_sqlite_session.md
│ │ │ │ ├── dapr_session.md
│ │ │ │ ├── encrypt_session.md
│ │ │ │ ├── redis_session.md
│ │ │ │ └── sqlalchemy_session.md
│ │ │ ├── models/
│ │ │ │ ├── litellm_model.md
│ │ │ │ └── litellm_provider.md
│ │ │ ├── tool_output_trimmer.md
│ │ │ └── visualization.md
│ │ ├── function_schema.md
│ │ ├── guardrail.md
│ │ ├── handoffs/
│ │ │ └── history.md
│ │ ├── handoffs.md
│ │ ├── index.md
│ │ ├── items.md
│ │ ├── lifecycle.md
│ │ ├── logger.md
│ │ ├── mcp/
│ │ │ ├── manager.md
│ │ │ ├── server.md
│ │ │ └── util.md
│ │ ├── memory/
│ │ │ ├── openai_conversations_session.md
│ │ │ ├── openai_responses_compaction_session.md
│ │ │ ├── session.md
│ │ │ ├── session_settings.md
│ │ │ ├── sqlite_session.md
│ │ │ └── util.md
│ │ ├── memory.md
│ │ ├── model_settings.md
│ │ ├── models/
│ │ │ ├── chatcmpl_converter.md
│ │ │ ├── chatcmpl_helpers.md
│ │ │ ├── chatcmpl_stream_handler.md
│ │ │ ├── default_models.md
│ │ │ ├── fake_id.md
│ │ │ ├── interface.md
│ │ │ ├── multi_provider.md
│ │ │ ├── openai_chatcompletions.md
│ │ │ ├── openai_provider.md
│ │ │ └── openai_responses.md
│ │ ├── prompts.md
│ │ ├── realtime/
│ │ │ ├── agent.md
│ │ │ ├── audio_formats.md
│ │ │ ├── config.md
│ │ │ ├── events.md
│ │ │ ├── handoffs.md
│ │ │ ├── items.md
│ │ │ ├── model.md
│ │ │ ├── model_events.md
│ │ │ ├── model_inputs.md
│ │ │ ├── openai_realtime.md
│ │ │ ├── runner.md
│ │ │ └── session.md
│ │ ├── repl.md
│ │ ├── responses_websocket_session.md
│ │ ├── result.md
│ │ ├── retry.md
│ │ ├── run.md
│ │ ├── run_config.md
│ │ ├── run_context.md
│ │ ├── run_error_handlers.md
│ │ ├── run_internal/
│ │ │ ├── agent_runner_helpers.md
│ │ │ ├── approvals.md
│ │ │ ├── error_handlers.md
│ │ │ ├── guardrails.md
│ │ │ ├── items.md
│ │ │ ├── model_retry.md
│ │ │ ├── oai_conversation.md
│ │ │ ├── run_loop.md
│ │ │ ├── run_steps.md
│ │ │ ├── session_persistence.md
│ │ │ ├── streaming.md
│ │ │ ├── tool_actions.md
│ │ │ ├── tool_execution.md
│ │ │ ├── tool_planning.md
│ │ │ ├── tool_use_tracker.md
│ │ │ ├── turn_preparation.md
│ │ │ └── turn_resolution.md
│ │ ├── run_state.md
│ │ ├── stream_events.md
│ │ ├── strict_schema.md
│ │ ├── tool.md
│ │ ├── tool_context.md
│ │ ├── tool_guardrails.md
│ │ ├── tracing/
│ │ │ ├── config.md
│ │ │ ├── context.md
│ │ │ ├── create.md
│ │ │ ├── index.md
│ │ │ ├── logger.md
│ │ │ ├── model_tracing.md
│ │ │ ├── processor_interface.md
│ │ │ ├── processors.md
│ │ │ ├── provider.md
│ │ │ ├── scope.md
│ │ │ ├── setup.md
│ │ │ ├── span_data.md
│ │ │ ├── spans.md
│ │ │ ├── traces.md
│ │ │ └── util.md
│ │ ├── usage.md
│ │ ├── version.md
│ │ └── voice/
│ │ ├── events.md
│ │ ├── exceptions.md
│ │ ├── imports.md
│ │ ├── input.md
│ │ ├── model.md
│ │ ├── models/
│ │ │ ├── openai_model_provider.md
│ │ │ ├── openai_provider.md
│ │ │ ├── openai_stt.md
│ │ │ └── openai_tts.md
│ │ ├── pipeline.md
│ │ ├── pipeline_config.md
│ │ ├── result.md
│ │ ├── utils.md
│ │ └── workflow.md
│ ├── release.md
│ ├── repl.md
│ ├── results.md
│ ├── running_agents.md
│ ├── scripts/
│ │ ├── generate_ref_files.py
│ │ └── translate_docs.py
│ ├── sessions/
│ │ ├── advanced_sqlite_session.md
│ │ ├── encrypted_session.md
│ │ ├── index.md
│ │ └── sqlalchemy_session.md
│ ├── streaming.md
│ ├── stylesheets/
│ │ └── extra.css
│ ├── tools.md
│ ├── tracing.md
│ ├── usage.md
│ ├── visualization.md
│ ├── voice/
│ │ ├── pipeline.md
│ │ ├── quickstart.md
│ │ └── tracing.md
│ └── zh/
│ ├── agents.md
│ ├── config.md
│ ├── context.md
│ ├── examples.md
│ ├── guardrails.md
│ ├── handoffs.md
│ ├── human_in_the_loop.md
│ ├── index.md
│ ├── mcp.md
│ ├── models/
│ │ ├── index.md
│ │ └── litellm.md
│ ├── multi_agent.md
│ ├── quickstart.md
│ ├── realtime/
│ │ ├── guide.md
│ │ ├── quickstart.md
│ │ └── transport.md
│ ├── release.md
│ ├── repl.md
│ ├── results.md
│ ├── running_agents.md
│ ├── sessions/
│ │ ├── advanced_sqlite_session.md
│ │ ├── encrypted_session.md
│ │ ├── index.md
│ │ └── sqlalchemy_session.md
│ ├── sessions.md
│ ├── streaming.md
│ ├── tools.md
│ ├── tracing.md
│ ├── usage.md
│ ├── visualization.md
│ └── voice/
│ ├── pipeline.md
│ ├── quickstart.md
│ └── tracing.md
├── examples/
│ ├── __init__.py
│ ├── agent_patterns/
│ │ ├── README.md
│ │ ├── agents_as_tools.py
│ │ ├── agents_as_tools_conditional.py
│ │ ├── agents_as_tools_streaming.py
│ │ ├── agents_as_tools_structured.py
│ │ ├── deterministic.py
│ │ ├── forcing_tool_use.py
│ │ ├── human_in_the_loop.py
│ │ ├── human_in_the_loop_custom_rejection.py
│ │ ├── human_in_the_loop_stream.py
│ │ ├── input_guardrails.py
│ │ ├── llm_as_a_judge.py
│ │ ├── output_guardrails.py
│ │ ├── parallelization.py
│ │ ├── routing.py
│ │ └── streaming_guardrails.py
│ ├── auto_mode.py
│ ├── basic/
│ │ ├── agent_lifecycle_example.py
│ │ ├── dynamic_system_prompt.py
│ │ ├── hello_world.py
│ │ ├── hello_world_gpt_5.py
│ │ ├── hello_world_gpt_oss.py
│ │ ├── hello_world_jupyter.ipynb
│ │ ├── image_tool_output.py
│ │ ├── lifecycle_example.py
│ │ ├── local_file.py
│ │ ├── local_image.py
│ │ ├── non_strict_output_type.py
│ │ ├── previous_response_id.py
│ │ ├── prompt_template.py
│ │ ├── remote_image.py
│ │ ├── remote_pdf.py
│ │ ├── retry.py
│ │ ├── retry_litellm.py
│ │ ├── stream_function_call_args.py
│ │ ├── stream_items.py
│ │ ├── stream_text.py
│ │ ├── stream_ws.py
│ │ ├── tool_guardrails.py
│ │ ├── tools.py
│ │ └── usage_tracking.py
│ ├── customer_service/
│ │ └── main.py
│ ├── financial_research_agent/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── agents/
│ │ │ ├── __init__.py
│ │ │ ├── financials_agent.py
│ │ │ ├── planner_agent.py
│ │ │ ├── risk_agent.py
│ │ │ ├── search_agent.py
│ │ │ ├── verifier_agent.py
│ │ │ └── writer_agent.py
│ │ ├── main.py
│ │ ├── manager.py
│ │ └── printer.py
│ ├── handoffs/
│ │ ├── message_filter.py
│ │ └── message_filter_streaming.py
│ ├── hosted_mcp/
│ │ ├── __init__.py
│ │ ├── connectors.py
│ │ ├── human_in_the_loop.py
│ │ ├── on_approval.py
│ │ └── simple.py
│ ├── mcp/
│ │ ├── filesystem_example/
│ │ │ ├── README.md
│ │ │ ├── main.py
│ │ │ └── sample_files/
│ │ │ ├── favorite_books.txt
│ │ │ ├── favorite_cities.txt
│ │ │ └── favorite_songs.txt
│ │ ├── get_all_mcp_tools_example/
│ │ │ ├── README.md
│ │ │ ├── main.py
│ │ │ └── sample_files/
│ │ │ ├── books.txt
│ │ │ └── favorite_songs.txt
│ │ ├── git_example/
│ │ │ ├── README.md
│ │ │ └── main.py
│ │ ├── manager_example/
│ │ │ ├── README.md
│ │ │ ├── app.py
│ │ │ └── mcp_server.py
│ │ ├── prompt_server/
│ │ │ ├── README.md
│ │ │ ├── main.py
│ │ │ └── server.py
│ │ ├── sse_example/
│ │ │ ├── README.md
│ │ │ ├── main.py
│ │ │ └── server.py
│ │ ├── sse_remote_example/
│ │ │ ├── README.md
│ │ │ └── main.py
│ │ ├── streamable_http_remote_example/
│ │ │ ├── README.md
│ │ │ └── main.py
│ │ ├── streamablehttp_custom_client_example/
│ │ │ ├── README.md
│ │ │ ├── main.py
│ │ │ └── server.py
│ │ ├── streamablehttp_example/
│ │ │ ├── README.md
│ │ │ ├── main.py
│ │ │ └── server.py
│ │ └── tool_filter_example/
│ │ ├── README.md
│ │ ├── main.py
│ │ └── sample_files/
│ │ ├── books.txt
│ │ └── favorite_songs.txt
│ ├── memory/
│ │ ├── advanced_sqlite_session_example.py
│ │ ├── compaction_session_example.py
│ │ ├── compaction_session_stateless_example.py
│ │ ├── dapr_session_example.py
│ │ ├── encrypted_session_example.py
│ │ ├── file_hitl_example.py
│ │ ├── file_session.py
│ │ ├── hitl_session_scenario.py
│ │ ├── memory_session_hitl_example.py
│ │ ├── openai_session_example.py
│ │ ├── openai_session_hitl_example.py
│ │ ├── redis_session_example.py
│ │ ├── sqlalchemy_session_example.py
│ │ └── sqlite_session_example.py
│ ├── model_providers/
│ │ ├── README.md
│ │ ├── custom_example_agent.py
│ │ ├── custom_example_global.py
│ │ ├── custom_example_provider.py
│ │ ├── litellm_auto.py
│ │ └── litellm_provider.py
│ ├── realtime/
│ │ ├── app/
│ │ │ ├── README.md
│ │ │ ├── agent.py
│ │ │ ├── server.py
│ │ │ └── static/
│ │ │ ├── app.js
│ │ │ ├── audio-playback.worklet.js
│ │ │ ├── audio-recorder.worklet.js
│ │ │ └── index.html
│ │ ├── cli/
│ │ │ └── demo.py
│ │ ├── twilio/
│ │ │ ├── README.md
│ │ │ ├── __init__.py
│ │ │ ├── requirements.txt
│ │ │ ├── server.py
│ │ │ └── twilio_handler.py
│ │ └── twilio_sip/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── agents.py
│ │ ├── requirements.txt
│ │ └── server.py
│ ├── reasoning_content/
│ │ ├── __init__.py
│ │ ├── gpt_oss_stream.py
│ │ ├── main.py
│ │ └── runner_example.py
│ ├── research_bot/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── agents/
│ │ │ ├── __init__.py
│ │ │ ├── planner_agent.py
│ │ │ ├── search_agent.py
│ │ │ └── writer_agent.py
│ │ ├── main.py
│ │ ├── manager.py
│ │ ├── printer.py
│ │ └── sample_outputs/
│ │ ├── product_recs.md
│ │ ├── product_recs.txt
│ │ ├── vacation.md
│ │ └── vacation.txt
│ ├── run_examples.py
│ ├── tools/
│ │ ├── apply_patch.py
│ │ ├── code_interpreter.py
│ │ ├── codex.py
│ │ ├── codex_same_thread.py
│ │ ├── computer_use.py
│ │ ├── container_shell_inline_skill.py
│ │ ├── container_shell_skill_reference.py
│ │ ├── file_search.py
│ │ ├── image_generator.py
│ │ ├── local_shell_skill.py
│ │ ├── shell.py
│ │ ├── shell_human_in_the_loop.py
│ │ ├── skills/
│ │ │ └── csv-workbench/
│ │ │ ├── SKILL.md
│ │ │ └── playbook.md
│ │ ├── tool_search.py
│ │ ├── web_search.py
│ │ └── web_search_filters.py
│ └── voice/
│ ├── __init__.py
│ ├── static/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── main.py
│ │ └── util.py
│ └── streamed/
│ ├── README.md
│ ├── __init__.py
│ ├── main.py
│ └── my_workflow.py
├── mkdocs.yml
├── pyproject.toml
├── pyrightconfig.json
├── src/
│ └── agents/
│ ├── __init__.py
│ ├── _config.py
│ ├── _debug.py
│ ├── _mcp_tool_metadata.py
│ ├── _tool_identity.py
│ ├── agent.py
│ ├── agent_output.py
│ ├── agent_tool_input.py
│ ├── agent_tool_state.py
│ ├── apply_diff.py
│ ├── computer.py
│ ├── editor.py
│ ├── exceptions.py
│ ├── extensions/
│ │ ├── __init__.py
│ │ ├── experimental/
│ │ │ ├── __init__.py
│ │ │ └── codex/
│ │ │ ├── __init__.py
│ │ │ ├── codex.py
│ │ │ ├── codex_options.py
│ │ │ ├── codex_tool.py
│ │ │ ├── events.py
│ │ │ ├── exec.py
│ │ │ ├── items.py
│ │ │ ├── output_schema_file.py
│ │ │ ├── payloads.py
│ │ │ ├── thread.py
│ │ │ ├── thread_options.py
│ │ │ └── turn_options.py
│ │ ├── handoff_filters.py
│ │ ├── handoff_prompt.py
│ │ ├── memory/
│ │ │ ├── __init__.py
│ │ │ ├── advanced_sqlite_session.py
│ │ │ ├── async_sqlite_session.py
│ │ │ ├── dapr_session.py
│ │ │ ├── encrypt_session.py
│ │ │ ├── redis_session.py
│ │ │ └── sqlalchemy_session.py
│ │ ├── models/
│ │ │ ├── __init__.py
│ │ │ ├── litellm_model.py
│ │ │ └── litellm_provider.py
│ │ ├── tool_output_trimmer.py
│ │ └── visualization.py
│ ├── function_schema.py
│ ├── guardrail.py
│ ├── handoffs/
│ │ ├── __init__.py
│ │ └── history.py
│ ├── items.py
│ ├── lifecycle.py
│ ├── logger.py
│ ├── mcp/
│ │ ├── __init__.py
│ │ ├── manager.py
│ │ ├── server.py
│ │ └── util.py
│ ├── memory/
│ │ ├── __init__.py
│ │ ├── openai_conversations_session.py
│ │ ├── openai_responses_compaction_session.py
│ │ ├── session.py
│ │ ├── session_settings.py
│ │ ├── sqlite_session.py
│ │ └── util.py
│ ├── model_settings.py
│ ├── models/
│ │ ├── __init__.py
│ │ ├── _openai_retry.py
│ │ ├── _openai_shared.py
│ │ ├── _retry_runtime.py
│ │ ├── chatcmpl_converter.py
│ │ ├── chatcmpl_helpers.py
│ │ ├── chatcmpl_stream_handler.py
│ │ ├── default_models.py
│ │ ├── fake_id.py
│ │ ├── interface.py
│ │ ├── multi_provider.py
│ │ ├── openai_chatcompletions.py
│ │ ├── openai_provider.py
│ │ ├── openai_responses.py
│ │ └── reasoning_content_replay.py
│ ├── prompts.py
│ ├── py.typed
│ ├── realtime/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── _default_tracker.py
│ │ ├── _util.py
│ │ ├── agent.py
│ │ ├── audio_formats.py
│ │ ├── config.py
│ │ ├── events.py
│ │ ├── handoffs.py
│ │ ├── items.py
│ │ ├── model.py
│ │ ├── model_events.py
│ │ ├── model_inputs.py
│ │ ├── openai_realtime.py
│ │ ├── runner.py
│ │ └── session.py
│ ├── repl.py
│ ├── responses_websocket_session.py
│ ├── result.py
│ ├── retry.py
│ ├── run.py
│ ├── run_config.py
│ ├── run_context.py
│ ├── run_error_handlers.py
│ ├── run_internal/
│ │ ├── __init__.py
│ │ ├── _asyncio_progress.py
│ │ ├── agent_runner_helpers.py
│ │ ├── approvals.py
│ │ ├── error_handlers.py
│ │ ├── guardrails.py
│ │ ├── items.py
│ │ ├── model_retry.py
│ │ ├── oai_conversation.py
│ │ ├── run_loop.py
│ │ ├── run_steps.py
│ │ ├── session_persistence.py
│ │ ├── streaming.py
│ │ ├── tool_actions.py
│ │ ├── tool_execution.py
│ │ ├── tool_planning.py
│ │ ├── tool_use_tracker.py
│ │ ├── turn_preparation.py
│ │ └── turn_resolution.py
│ ├── run_state.py
│ ├── stream_events.py
│ ├── strict_schema.py
│ ├── tool.py
│ ├── tool_context.py
│ ├── tool_guardrails.py
│ ├── tracing/
│ │ ├── __init__.py
│ │ ├── config.py
│ │ ├── context.py
│ │ ├── create.py
│ │ ├── logger.py
│ │ ├── model_tracing.py
│ │ ├── processor_interface.py
│ │ ├── processors.py
│ │ ├── provider.py
│ │ ├── scope.py
│ │ ├── setup.py
│ │ ├── span_data.py
│ │ ├── spans.py
│ │ ├── traces.py
│ │ └── util.py
│ ├── usage.py
│ ├── util/
│ │ ├── __init__.py
│ │ ├── _approvals.py
│ │ ├── _coro.py
│ │ ├── _error_tracing.py
│ │ ├── _json.py
│ │ ├── _pretty_print.py
│ │ ├── _transforms.py
│ │ └── _types.py
│ ├── version.py
│ └── voice/
│ ├── __init__.py
│ ├── events.py
│ ├── exceptions.py
│ ├── imports.py
│ ├── input.py
│ ├── model.py
│ ├── models/
│ │ ├── __init__.py
│ │ ├── openai_model_provider.py
│ │ ├── openai_stt.py
│ │ └── openai_tts.py
│ ├── pipeline.py
│ ├── pipeline_config.py
│ ├── result.py
│ ├── utils.py
│ └── workflow.py
└── tests/
├── README.md
├── __init__.py
├── conftest.py
├── extensions/
│ ├── experiemental/
│ │ └── codex/
│ │ ├── test_codex_exec_thread.py
│ │ └── test_codex_tool.py
│ ├── memory/
│ │ ├── test_advanced_sqlite_session.py
│ │ ├── test_async_sqlite_session.py
│ │ ├── test_dapr_redis_integration.py
│ │ ├── test_dapr_session.py
│ │ ├── test_encrypt_session.py
│ │ ├── test_redis_session.py
│ │ └── test_sqlalchemy_session.py
│ └── test_tool_output_trimmer.py
├── fake_model.py
├── fastapi/
│ ├── __init__.py
│ ├── streaming_app.py
│ └── test_streaming_context.py
├── mcp/
│ ├── __init__.py
│ ├── helpers.py
│ ├── test_caching.py
│ ├── test_client_session_retries.py
│ ├── test_connect_disconnect.py
│ ├── test_mcp_approval.py
│ ├── test_mcp_auth_params.py
│ ├── test_mcp_server_manager.py
│ ├── test_mcp_tracing.py
│ ├── test_mcp_util.py
│ ├── test_message_handler.py
│ ├── test_prompt_server.py
│ ├── test_runner_calls_mcp.py
│ ├── test_server_errors.py
│ ├── test_streamable_http_client_factory.py
│ ├── test_streamable_http_session_id.py
│ └── test_tool_filtering.py
├── memory/
│ └── test_openai_responses_compaction_session.py
├── model_settings/
│ └── test_serialization.py
├── models/
│ ├── __init__.py
│ ├── test_deepseek_reasoning_content.py
│ ├── test_default_models.py
│ ├── test_kwargs_functionality.py
│ ├── test_litellm_chatcompletions_stream.py
│ ├── test_litellm_extra_body.py
│ ├── test_litellm_logging_patch.py
│ ├── test_litellm_user_agent.py
│ ├── test_map.py
│ └── test_reasoning_content_replay_hook.py
├── realtime/
│ ├── __init__.py
│ ├── test_agent.py
│ ├── test_audio_formats_unit.py
│ ├── test_conversion_helpers.py
│ ├── test_ga_session_update_normalization.py
│ ├── test_item_parsing.py
│ ├── test_model_events.py
│ ├── test_openai_realtime.py
│ ├── test_openai_realtime_conversions.py
│ ├── test_openai_realtime_sip_model.py
│ ├── test_playback_tracker.py
│ ├── test_playback_tracker_manual_unit.py
│ ├── test_realtime_handoffs.py
│ ├── test_realtime_model_settings.py
│ ├── test_runner.py
│ ├── test_session.py
│ ├── test_session_payload_and_formats.py
│ ├── test_tracing.py
│ └── test_twilio_sip_server.py
├── test_agent_as_tool.py
├── test_agent_clone_shallow_copy.py
├── test_agent_config.py
├── test_agent_hooks.py
├── test_agent_instructions_signature.py
├── test_agent_llm_hooks.py
├── test_agent_memory_leak.py
├── test_agent_prompt.py
├── test_agent_runner.py
├── test_agent_runner_streamed.py
├── test_agent_runner_sync.py
├── test_agent_tool_input.py
├── test_agent_tool_state.py
├── test_agent_tracing.py
├── test_agents_logging.py
├── test_anthropic_thinking_blocks.py
├── test_apply_diff.py
├── test_apply_diff_helpers.py
├── test_apply_patch_tool.py
├── test_asyncio_progress.py
├── test_call_model_input_filter.py
├── test_call_model_input_filter_unit.py
├── test_cancel_streaming.py
├── test_computer_action.py
├── test_computer_tool_lifecycle.py
├── test_config.py
├── test_debug.py
├── test_doc_parsing.py
├── test_example_workflows.py
├── test_extended_thinking_message_order.py
├── test_extension_filters.py
├── test_extra_headers.py
├── test_function_schema.py
├── test_function_tool.py
├── test_function_tool_decorator.py
├── test_gemini_thought_signatures.py
├── test_gemini_thought_signatures_stream.py
├── test_global_hooks.py
├── test_guardrails.py
├── test_handoff_history_duplication.py
├── test_handoff_prompt.py
├── test_handoff_tool.py
├── test_hitl_error_scenarios.py
├── test_hitl_session_scenario.py
├── test_hitl_utils.py
├── test_items_helpers.py
├── test_local_shell_tool.py
├── test_logprobs.py
├── test_max_turns.py
├── test_model_payload_iterators.py
├── test_model_retry.py
├── test_openai_chatcompletions.py
├── test_openai_chatcompletions_converter.py
├── test_openai_chatcompletions_stream.py
├── test_openai_conversations_session.py
├── test_openai_responses.py
├── test_openai_responses_converter.py
├── test_output_tool.py
├── test_pr_labels.py
├── test_pretty_print.py
├── test_process_model_response.py
├── test_reasoning_content.py
├── test_remove_openai_responses_api_incompatible_fields.py
├── test_repl.py
├── test_responses.py
├── test_responses_tracing.py
├── test_responses_websocket_session.py
├── test_result_cast.py
├── test_run.py
├── test_run_config.py
├── test_run_context_approvals.py
├── test_run_context_wrapper.py
├── test_run_error_details.py
├── test_run_hooks.py
├── test_run_impl_resume_paths.py
├── test_run_internal_error_handlers.py
├── test_run_internal_items.py
├── test_run_state.py
├── test_run_step_execution.py
├── test_run_step_processing.py
├── test_runner_guardrail_resume.py
├── test_server_conversation_tracker.py
├── test_session.py
├── test_session_exceptions.py
├── test_session_limit.py
├── test_shell_call_serialization.py
├── test_shell_tool.py
├── test_soft_cancel.py
├── test_source_compat_constructors.py
├── test_stream_events.py
├── test_stream_input_guardrail_timing.py
├── test_streaming_logging.py
├── test_streaming_tool_call_arguments.py
├── test_strict_schema.py
├── test_strict_schema_oneof.py
├── test_tool_choice_reset.py
├── test_tool_context.py
├── test_tool_converter.py
├── test_tool_guardrails.py
├── test_tool_metadata.py
├── test_tool_output_conversion.py
├── test_tool_use_behavior.py
├── test_tool_use_tracker.py
├── test_trace_processor.py
├── test_tracing.py
├── test_tracing_errors.py
├── test_tracing_errors_streamed.py
├── test_tracing_provider_safe_debug.py
├── test_usage.py
├── test_visualization.py
├── testing_processor.py
├── tracing/
│ ├── test_import_side_effects.py
│ ├── test_logger.py
│ ├── test_processor_api_key.py
│ ├── test_set_api_key_fix.py
│ ├── test_setup.py
│ ├── test_trace_context.py
│ ├── test_traces_impl.py
│ └── test_tracing_env_disable.py
├── utils/
│ ├── factories.py
│ ├── hitl.py
│ ├── simple_session.py
│ ├── test_json.py
│ └── test_simple_session.py
└── voice/
├── __init__.py
├── fake_models.py
├── helpers.py
├── test_input.py
├── test_openai_stt.py
├── test_openai_tts.py
├── test_pipeline.py
└── test_workflow.py
Showing preview only (675K chars total). Download the full file or copy to clipboard to get everything.
SYMBOL INDEX (6257 symbols across 466 files)
FILE: .github/scripts/pr_labels.py
class PRContext (line 65) | class PRContext:
function read_file_at (line 70) | def read_file_at(commit: str | None, path: str) -> str | None:
function dependency_lines_for_pyproject (line 79) | def dependency_lines_for_pyproject(text: str) -> set[int]:
function pyproject_dependency_changed (line 117) | def pyproject_dependency_changed(
function infer_specific_feature_labels (line 180) | def infer_specific_feature_labels(changed_files: Sequence[str]) -> set[s...
function infer_feature_labels (line 205) | def infer_feature_labels(changed_files: Sequence[str]) -> set[str]:
function infer_fallback_labels (line 218) | def infer_fallback_labels(changed_files: Sequence[str]) -> set[str]:
function load_json (line 222) | def load_json(path: pathlib.Path) -> Any:
function load_pr_context (line 226) | def load_pr_context(path: pathlib.Path) -> PRContext:
function load_codex_labels (line 248) | def load_codex_labels(path: pathlib.Path) -> tuple[list[str], bool]:
function fetch_existing_labels (line 274) | def fetch_existing_labels(pr_number: str) -> set[str]:
function infer_title_intent_labels (line 282) | def infer_title_intent_labels(pr_context: PRContext) -> set[str]:
function compute_desired_labels (line 295) | def compute_desired_labels(
function compute_managed_labels (line 340) | def compute_managed_labels(
function parse_args (line 355) | def parse_args(argv: Sequence[str] | None = None) -> argparse.Namespace:
function main (line 380) | def main(argv: Sequence[str] | None = None) -> int:
FILE: .github/scripts/select-release-milestone.py
function warn (line 13) | def warn(message: str) -> None:
function parse_version (line 17) | def parse_version(value: str | None) -> tuple[int, int, int] | None:
function latest_tag_version (line 29) | def latest_tag_version(exclude_version: tuple[int, int, int] | None) -> ...
function classify_bump (line 48) | def classify_bump(
function parse_milestone_title (line 64) | def parse_milestone_title(title: str | None) -> tuple[int, int] | None:
function fetch_open_milestones (line 73) | def fetch_open_milestones(owner: str, repo: str, token: str) -> list[dict]:
function select_milestone (line 90) | def select_milestone(milestones: list[dict], required_bump: str) -> str ...
function main (line 138) | def main() -> int:
FILE: docs/scripts/generate_ref_files.py
function to_identifier (line 22) | def to_identifier(py_path: Path) -> str:
function md_target (line 28) | def md_target(py_path: Path) -> Path:
function pretty_title (line 34) | def pretty_title(last_segment: str) -> str:
function main (line 46) | def main() -> None:
FILE: docs/scripts/translate_docs.py
function built_instructions (line 185) | def built_instructions(target_language: str, lang_code: str) -> str:
function translate_file (line 296) | def translate_file(file_path: str, target_path: str, lang_code: str) -> ...
function git_last_commit_timestamp (line 374) | def git_last_commit_timestamp(path: str) -> int:
function should_translate_based_on_translation (line 393) | def should_translate_based_on_translation(file_path: str) -> bool:
function translate_single_source_file (line 405) | def translate_single_source_file(
function normalize_source_file_arg (line 426) | def normalize_source_file_arg(file_arg: str) -> str:
function translate_source_files (line 434) | def translate_source_files(
function main (line 459) | def main():
FILE: examples/agent_patterns/agents_as_tools.py
function main (line 59) | async def main():
FILE: examples/agent_patterns/agents_as_tools_conditional.py
class AppContext (line 16) | class AppContext(BaseModel):
function french_spanish_enabled (line 20) | def french_spanish_enabled(ctx: RunContextWrapper[AppContext], agent: Ag...
function european_enabled (line 25) | def european_enabled(ctx: RunContextWrapper[AppContext], agent: AgentBas...
function get_user_name (line 31) | async def get_user_name() -> str:
function main (line 83) | async def main():
FILE: examples/agent_patterns/agents_as_tools_streaming.py
function billing_status_checker (line 10) | def billing_status_checker(customer_id: str | None = None, question: str...
function handle_stream (line 18) | def handle_stream(event: AgentToolStreamEvent) -> None:
function main (line 26) | async def main() -> None:
FILE: examples/agent_patterns/agents_as_tools_structured.py
class TranslationInput (line 12) | class TranslationInput(BaseModel):
function main (line 53) | async def main() -> None:
FILE: examples/agent_patterns/deterministic.py
class OutlineCheckerOutput (line 24) | class OutlineCheckerOutput(BaseModel):
function main (line 42) | async def main():
FILE: examples/agent_patterns/forcing_tool_use.py
class Weather (line 40) | class Weather(BaseModel):
function get_weather (line 47) | def get_weather(city: str) -> Weather:
function custom_tool_use_behavior (line 52) | async def custom_tool_use_behavior(
function main (line 61) | async def main(tool_use_behavior: Literal["default", "first_tool", "cust...
function auto_demo (line 85) | async def auto_demo() -> None:
FILE: examples/agent_patterns/human_in_the_loop.py
function get_weather (line 19) | async def get_weather(city: str) -> str:
function _needs_temperature_approval (line 31) | async def _needs_temperature_approval(_ctx, params, _call_id) -> bool:
function get_temperature (line 40) | async def get_temperature(city: str) -> str:
function confirm (line 65) | async def confirm(question: str) -> bool:
function main (line 77) | async def main():
FILE: examples/agent_patterns/human_in_the_loop_custom_rejection.py
function tool_error_formatter (line 24) | async def tool_error_formatter(args: ToolErrorFormatterArgs[None]) -> st...
function publish_announcement (line 33) | async def publish_announcement(title: str, body: str) -> str:
function _find_formatter_output (line 38) | def _find_formatter_output(result: object) -> str | None:
function main (line 52) | async def main() -> None:
FILE: examples/agent_patterns/human_in_the_loop_stream.py
function _needs_temperature_approval (line 17) | async def _needs_temperature_approval(_ctx, params, _call_id) -> bool:
function get_temperature (line 26) | async def get_temperature(city: str) -> str:
function get_weather (line 39) | async def get_weather(city: str) -> str:
function confirm (line 51) | async def confirm(question: str) -> bool:
function main (line 63) | async def main():
FILE: examples/agent_patterns/input_guardrails.py
class MathHomeworkOutput (line 33) | class MathHomeworkOutput(BaseModel):
function math_guardrail (line 46) | async def math_guardrail(
function main (line 64) | async def main():
FILE: examples/agent_patterns/llm_as_a_judge.py
class EvaluationFeedback (line 26) | class EvaluationFeedback:
function main (line 42) | async def main() -> None:
FILE: examples/agent_patterns/output_guardrails.py
class MessageOutput (line 31) | class MessageOutput(BaseModel):
function sensitive_data_check (line 38) | async def sensitive_data_check(
function main (line 61) | async def main():
FILE: examples/agent_patterns/parallelization.py
function main (line 22) | async def main():
FILE: examples/agent_patterns/routing.py
function main (line 37) | async def main():
FILE: examples/agent_patterns/streaming_guardrails.py
class GuardrailOutput (line 29) | class GuardrailOutput(BaseModel):
function check_guardrail (line 49) | async def check_guardrail(text: str) -> GuardrailOutput:
function main (line 54) | async def main():
FILE: examples/auto_mode.py
function is_auto_mode (line 14) | def is_auto_mode() -> bool:
function input_with_fallback (line 19) | def input_with_fallback(prompt: str, fallback: str) -> str:
function confirm_with_fallback (line 27) | def confirm_with_fallback(prompt: str, default: bool = True) -> bool:
FILE: examples/basic/agent_lifecycle_example.py
class CustomAgentHooks (line 19) | class CustomAgentHooks(AgentHooks):
method __init__ (line 20) | def __init__(self, display_name: str):
method on_start (line 24) | async def on_start(self, context: AgentHookContext, agent: Agent) -> N...
method on_end (line 31) | async def on_end(self, context: RunContextWrapper, agent: Agent, outpu...
method on_handoff (line 37) | async def on_handoff(self, context: RunContextWrapper, agent: Agent, s...
method on_tool_start (line 47) | async def on_tool_start(self, context: RunContextWrapper, agent: Agent...
method on_tool_end (line 53) | async def on_tool_end(
function random_number (line 66) | def random_number(max: int) -> int:
function multiply_by_two (line 83) | def multiply_by_two(x: int) -> int:
class FinalResult (line 88) | class FinalResult(BaseModel):
function main (line 110) | async def main() -> None:
FILE: examples/basic/dynamic_system_prompt.py
class CustomContext (line 10) | class CustomContext:
function custom_instructions (line 14) | def custom_instructions(
function main (line 32) | async def main():
FILE: examples/basic/hello_world.py
function main (line 6) | async def main():
FILE: examples/basic/hello_world_gpt_5.py
function main (line 15) | async def main():
FILE: examples/basic/hello_world_gpt_oss.py
function main (line 24) | async def main():
FILE: examples/basic/image_tool_output.py
function fetch_random_image (line 11) | def fetch_random_image() -> ToolOutputImage | ToolOutputImageDict:
function main (line 21) | async def main():
FILE: examples/basic/lifecycle_example.py
class LoggingHooks (line 23) | class LoggingHooks(AgentHooks[Any]):
method on_start (line 24) | async def on_start(
method on_end (line 32) | async def on_end(
class ExampleHooks (line 41) | class ExampleHooks(RunHooks):
method __init__ (line 42) | def __init__(self):
method _usage_to_str (line 45) | def _usage_to_str(self, usage: Usage) -> str:
method on_agent_start (line 48) | async def on_agent_start(self, context: AgentHookContext, agent: Agent...
method on_llm_start (line 55) | async def on_llm_start(
method on_llm_end (line 65) | async def on_llm_end(
method on_agent_end (line 71) | async def on_agent_end(self, context: RunContextWrapper, agent: Agent,...
method on_tool_start (line 81) | async def on_tool_start(self, context: RunContextWrapper, agent: Agent...
method on_tool_end (line 90) | async def on_tool_end(
method on_handoff (line 101) | async def on_handoff(
function random_number (line 116) | def random_number(max: int) -> int:
function multiply_by_two (line 122) | def multiply_by_two(x: int) -> int:
class FinalResult (line 127) | class FinalResult(BaseModel):
function main (line 149) | async def main() -> None:
FILE: examples/basic/local_file.py
function file_to_base64 (line 10) | def file_to_base64(file_path: str) -> str:
function main (line 15) | async def main():
FILE: examples/basic/local_image.py
function image_to_base64 (line 10) | def image_to_base64(image_path):
function main (line 16) | async def main():
FILE: examples/basic/non_strict_output_type.py
class OutputType (line 22) | class OutputType:
class CustomOutputSchema (line 27) | class CustomOutputSchema(AgentOutputSchemaBase):
method is_plain_text (line 30) | def is_plain_text(self) -> bool:
method name (line 33) | def name(self) -> str:
method json_schema (line 36) | def json_schema(self) -> dict[str, Any]:
method is_strict_json_schema (line 42) | def is_strict_json_schema(self) -> bool:
method validate_json (line 45) | def validate_json(self, json_str: str) -> Any:
function main (line 51) | async def main():
FILE: examples/basic/previous_response_id.py
function main (line 18) | async def main():
function main_stream (line 38) | async def main_stream():
FILE: examples/basic/prompt_template.py
class DynamicContext (line 24) | class DynamicContext:
method __init__ (line 25) | def __init__(self, prompt_id: str):
function _get_dynamic_prompt (line 31) | async def _get_dynamic_prompt(data: GenerateDynamicPromptData):
function dynamic_prompt (line 42) | async def dynamic_prompt(prompt_id: str):
function static_prompt (line 54) | async def static_prompt(prompt_id: str):
FILE: examples/basic/remote_image.py
function main (line 8) | async def main():
FILE: examples/basic/remote_pdf.py
function main (line 8) | async def main():
FILE: examples/basic/retry.py
function format_error (line 15) | def format_error(error: object) -> str:
function main (line 21) | async def main() -> None:
FILE: examples/basic/retry_litellm.py
function format_error (line 15) | def format_error(error: object) -> str:
function main (line 21) | async def main() -> None:
FILE: examples/basic/stream_function_call_args.py
function write_file (line 10) | def write_file(filename: Annotated[str, "Name of the file"], content: st...
function create_config (line 16) | def create_config(
function main (line 25) | async def main():
FILE: examples/basic/stream_items.py
function how_many_jokes (line 8) | def how_many_jokes() -> int:
function main (line 13) | async def main():
FILE: examples/basic/stream_text.py
function main (line 8) | async def main():
FILE: examples/basic/stream_ws.py
function lookup_order (line 39) | def lookup_order(order_id: str) -> dict[str, Any]:
function submit_refund (line 73) | def submit_refund(order_id: str, amount: float, reason: str) -> dict[str...
function ask_approval (line 85) | def ask_approval(question: str) -> bool:
function run_streamed_turn (line 90) | async def run_streamed_turn(
function main (line 162) | async def main() -> None:
FILE: examples/basic/tool_guardrails.py
function send_email (line 18) | def send_email(to: str, subject: str, body: str) -> str:
function get_user_data (line 24) | def get_user_data(user_id: str) -> dict[str, str]:
function get_contact_info (line 37) | def get_contact_info(user_id: str) -> dict[str, str]:
function reject_sensitive_words (line 48) | def reject_sensitive_words(data: ToolInputGuardrailData) -> ToolGuardrai...
function block_sensitive_output (line 77) | def block_sensitive_output(data: ToolOutputGuardrailData) -> ToolGuardra...
function reject_phone_numbers (line 92) | def reject_phone_numbers(data: ToolOutputGuardrailData) -> ToolGuardrail...
function main (line 115) | async def main():
FILE: examples/basic/tools.py
class Weather (line 9) | class Weather(BaseModel):
function get_weather (line 16) | def get_weather(city: Annotated[str, "The city to get the weather for"])...
function main (line 29) | async def main():
FILE: examples/basic/usage_tracking.py
class Weather (line 8) | class Weather(BaseModel):
function get_weather (line 15) | def get_weather(city: str) -> Weather:
function print_usage (line 20) | def print_usage(usage: Usage) -> None:
function main (line 30) | async def main() -> None:
FILE: examples/customer_service/main.py
class AirlineAgentContext (line 29) | class AirlineAgentContext(BaseModel):
function faq_lookup_tool (line 42) | async def faq_lookup_tool(question: str) -> str:
function update_seat (line 68) | async def update_seat(
function on_seat_booking_handoff (line 89) | async def on_seat_booking_handoff(context: RunContextWrapper[AirlineAgen...
function main (line 143) | async def main():
FILE: examples/financial_research_agent/agents/financials_agent.py
class AnalysisSummary (line 14) | class AnalysisSummary(BaseModel):
FILE: examples/financial_research_agent/agents/planner_agent.py
class FinancialSearchItem (line 17) | class FinancialSearchItem(BaseModel):
class FinancialSearchPlan (line 25) | class FinancialSearchPlan(BaseModel):
FILE: examples/financial_research_agent/agents/risk_agent.py
class AnalysisSummary (line 13) | class AnalysisSummary(BaseModel):
FILE: examples/financial_research_agent/agents/verifier_agent.py
class VerificationResult (line 14) | class VerificationResult(BaseModel):
FILE: examples/financial_research_agent/agents/writer_agent.py
class FinancialReportData (line 16) | class FinancialReportData(BaseModel):
FILE: examples/financial_research_agent/main.py
function main (line 12) | async def main() -> None:
FILE: examples/financial_research_agent/manager.py
function _summary_extractor (line 20) | async def _summary_extractor(run_result: RunResult | RunResultStreaming)...
class FinancialResearchManager (line 27) | class FinancialResearchManager:
method __init__ (line 32) | def __init__(self) -> None:
method run (line 36) | async def run(self, query: str) -> None:
method _plan_searches (line 64) | async def _plan_searches(self, query: str) -> FinancialSearchPlan:
method _perform_searches (line 74) | async def _perform_searches(self, search_plan: FinancialSearchPlan) ->...
method _search (line 103) | async def _search(self, item: FinancialSearchItem) -> str | None:
method _write_report (line 111) | async def _write_report(self, query: str, search_results: Sequence[str...
method _verify_report (line 143) | async def _verify_report(self, report: FinancialReportData) -> Verific...
FILE: examples/financial_research_agent/printer.py
class Printer (line 8) | class Printer:
method __init__ (line 14) | def __init__(self, console: Console) -> None:
method end (line 20) | def end(self) -> None:
method hide_done_checkmark (line 23) | def hide_done_checkmark(self, item_id: str) -> None:
method update_item (line 26) | def update_item(
method mark_item_done (line 34) | def mark_item_done(self, item_id: str) -> None:
method flush (line 38) | def flush(self) -> None:
FILE: examples/handoffs/message_filter.py
function random_number_tool (line 12) | def random_number_tool(max: int) -> int:
function spanish_handoff_message_filter (line 17) | def spanish_handoff_message_filter(handoff_message_data: HandoffInputDat...
function main (line 66) | async def main():
FILE: examples/handoffs/message_filter_streaming.py
function random_number_tool (line 12) | def random_number_tool(max: int) -> int:
function spanish_handoff_message_filter (line 17) | def spanish_handoff_message_filter(handoff_message_data: HandoffInputDat...
function main (line 66) | async def main():
FILE: examples/hosted_mcp/connectors.py
function main (line 13) | async def main(verbose: bool, stream: bool):
FILE: examples/hosted_mcp/human_in_the_loop.py
function prompt_for_interruption (line 10) | def prompt_for_interruption(
function _drain_stream (line 31) | async def _drain_stream(
function main (line 45) | async def main(verbose: bool, stream: bool) -> None:
FILE: examples/hosted_mcp/on_approval.py
function prompt_approval (line 18) | def prompt_approval(request: MCPToolApprovalRequest) -> MCPToolApprovalF...
function main (line 30) | async def main(verbose: bool, stream: bool) -> None:
FILE: examples/hosted_mcp/simple.py
function main (line 10) | async def main(verbose: bool, stream: bool, repo: str):
FILE: examples/mcp/filesystem_example/main.py
function run (line 9) | async def run(mcp_server: MCPServer):
function main (line 35) | async def main():
FILE: examples/mcp/get_all_mcp_tools_example/main.py
function list_tools (line 13) | async def list_tools(server: MCPServer, *, convert_to_strict: bool) -> l...
function prompt_user_approval (line 27) | def prompt_user_approval(interruption_name: str) -> bool:
function resolve_interruptions (line 43) | async def resolve_interruptions(agent: Agent, result: Any) -> Any:
function main (line 60) | async def main():
FILE: examples/mcp/git_example/main.py
function run (line 9) | async def run(mcp_server: MCPServer, directory_path: str):
function main (line 29) | async def main():
FILE: examples/mcp/manager_example/app.py
class AddRequest (line 18) | class AddRequest(BaseModel):
class RunRequest (line 23) | class RunRequest(BaseModel):
class ReconnectRequest (line 27) | class ReconnectRequest(BaseModel):
function lifespan (line 32) | async def lifespan(app: FastAPI):
function health (line 59) | async def health() -> dict[str, object]:
function list_tools (line 75) | async def list_tools() -> dict[str, object]:
function add (line 84) | async def add(req: AddRequest) -> dict[str, object]:
function run_agent (line 93) | async def run_agent(req: RunRequest) -> dict[str, object]:
function reconnect (line 112) | async def reconnect(req: ReconnectRequest) -> dict[str, object]:
function _get_active_servers (line 120) | def _get_active_servers() -> list[MCPServer]:
FILE: examples/mcp/manager_example/mcp_server.py
function add (line 16) | def add(a: int, b: int) -> int:
function echo (line 21) | def echo(message: str) -> str:
FILE: examples/mcp/prompt_server/main.py
function _choose_port (line 16) | def _choose_port() -> int:
function get_instructions_from_prompt (line 31) | async def get_instructions_from_prompt(mcp_server: MCPServer, prompt_nam...
function demo_code_review (line 49) | async def demo_code_review(mcp_server: MCPServer):
function show_available_prompts (line 82) | async def show_available_prompts(mcp_server: MCPServer):
function main (line 93) | async def main():
FILE: examples/mcp/prompt_server/server.py
function generate_code_review_instructions (line 14) | def generate_code_review_instructions(
FILE: examples/mcp/sse_example/main.py
function run (line 13) | async def run(mcp_server: MCPServer):
function main (line 40) | async def main():
FILE: examples/mcp/sse_example/server.py
function add (line 10) | def add(a: int, b: int) -> int:
function get_secret_word (line 17) | def get_secret_word() -> str:
function get_current_weather (line 23) | def get_current_weather(city: str) -> str:
FILE: examples/mcp/sse_remote_example/main.py
function main (line 7) | async def main():
FILE: examples/mcp/streamable_http_remote_example/main.py
function main (line 7) | async def main():
FILE: examples/mcp/streamablehttp_custom_client_example/main.py
function _choose_port (line 24) | def _choose_port() -> int:
function create_custom_http_client (line 39) | def create_custom_http_client(
function run_with_custom_client (line 71) | async def run_with_custom_client(mcp_server: MCPServer):
function main (line 87) | async def main():
FILE: examples/mcp/streamablehttp_custom_client_example/server.py
function add (line 14) | def add(a: int, b: int) -> int:
function get_secret_word (line 21) | def get_secret_word() -> str:
FILE: examples/mcp/streamablehttp_example/main.py
function _choose_port (line 16) | def _choose_port() -> int:
function run (line 31) | async def run(mcp_server: MCPServer):
function main (line 58) | async def main():
FILE: examples/mcp/streamablehttp_example/server.py
function add (line 15) | def add(a: int, b: int) -> int:
function get_secret_word (line 22) | def get_secret_word() -> str:
function get_current_weather (line 28) | def get_current_weather(city: str) -> str:
FILE: examples/mcp/tool_filter_example/main.py
function run_with_auto_approval (line 11) | async def run_with_auto_approval(agent: Agent[Any], message: str) -> str...
function main (line 24) | async def main():
FILE: examples/memory/advanced_sqlite_session_example.py
function get_weather (line 16) | async def get_weather(city: str) -> str:
function main (line 22) | async def main():
FILE: examples/memory/compaction_session_example.py
function main (line 14) | async def main():
FILE: examples/memory/compaction_session_stateless_example.py
function main (line 13) | async def main():
FILE: examples/memory/dapr_session_example.py
function ping_with_retry (line 87) | async def ping_with_retry(
function main (line 102) | async def main():
function demonstrate_advanced_features (line 230) | async def demonstrate_advanced_features():
function setup_instructions (line 299) | async def setup_instructions():
function demonstrate_multi_store (line 377) | async def demonstrate_multi_store():
function _write_text_file (line 435) | def _write_text_file(path: Path, content: str, overwrite: bool) -> None:
function _docker_available (line 441) | def _docker_available() -> bool:
function _container_running (line 445) | def _container_running(name: str):
function _ensure_container (line 462) | def _ensure_container(name: str, run_args: list[str]) -> None:
function setup_environment (line 482) | def setup_environment(components_dir: str = "./components", overwrite: b...
FILE: examples/memory/encrypted_session_example.py
function main (line 17) | async def main():
FILE: examples/memory/file_hitl_example.py
function main (line 22) | async def main() -> None:
function create_lookup_customer_profile_tool (line 119) | def create_lookup_customer_profile_tool(
function format_tool_arguments (line 135) | def format_tool_arguments(interruption: Any) -> str:
function prompt_yes_no (line 147) | async def prompt_yes_no(question: str) -> bool:
FILE: examples/memory/file_session.py
class FileSession (line 20) | class FileSession(Session):
method __init__ (line 25) | def __init__(self, *, dir: str | Path | None = None, session_id: str |...
method _ensure_session_id (line 31) | async def _ensure_session_id(self) -> str:
method get_session_id (line 42) | async def get_session_id(self) -> str:
method get_items (line 46) | async def get_items(self, limit: int | None = None) -> list[Any]:
method add_items (line 53) | async def add_items(self, items: list[Any]) -> None:
method pop_item (line 62) | async def pop_item(self) -> Any | None:
method clear_session (line 71) | async def clear_session(self) -> None:
method _items_path (line 86) | def _items_path(self, session_id: str) -> Path:
method _state_path (line 89) | def _state_path(self, session_id: str) -> Path:
method _read_items (line 92) | async def _read_items(self, session_id: str) -> list[Any]:
method _write_items (line 101) | async def _write_items(self, session_id: str, items: list[Any]) -> None:
method load_state_json (line 107) | async def load_state_json(self) -> dict[str, Any] | None:
method save_state_json (line 118) | async def save_state_json(self, state: dict[str, Any]) -> None:
FILE: examples/memory/hitl_session_scenario.py
function tool_output_for (line 31) | def tool_output_for(name: str, message: str) -> str:
function approval_echo (line 44) | def approval_echo(query: str) -> str:
function approval_note (line 54) | def approval_note(query: str) -> str:
class ScenarioStep (line 60) | class ScenarioStep:
function run_scenario_step (line 68) | async def run_scenario_step(
function run_file_session_scenario (line 144) | async def run_file_session_scenario(*, model: str | Model | None = None)...
function run_openai_session_scenario (line 207) | async def run_openai_session_scenario(*, model: str | Model | None = Non...
function get_conversation_id (line 273) | async def get_conversation_id(session: OpenAIConversationsSession) -> str:
function get_user_text (line 277) | def get_user_text(item: TResponseInputItem) -> str | None:
function get_item_type (line 294) | def get_item_type(item: TResponseInputItem) -> str:
function is_function_call (line 300) | def is_function_call(item: TResponseInputItem) -> bool:
function is_function_call_output (line 304) | def is_function_call_output(item: TResponseInputItem) -> bool:
function find_last_item (line 308) | def find_last_item(items: list[TResponseInputItem], predicate: Any) -> d...
function extract_call_id (line 316) | def extract_call_id(item: dict[str, Any]) -> str | None:
function cast_str (line 320) | def cast_str(value: Any) -> str | None:
function log_session_summary (line 324) | def log_session_summary(items: list[TResponseInputItem], label: str) -> ...
function format_output (line 360) | def format_output(output: Any) -> str:
function truncate_text (line 378) | def truncate_text(text: str, max_length: int = 140) -> str:
function main (line 387) | async def main() -> None:
FILE: examples/memory/memory_session_hitl_example.py
function _needs_approval (line 15) | async def _needs_approval(_ctx, _params, _call_id) -> bool:
function get_weather (line 21) | def get_weather(location: str) -> str:
function prompt_yes_no (line 44) | async def prompt_yes_no(question: str) -> bool:
function main (line 56) | async def main():
FILE: examples/memory/openai_session_example.py
function main (line 13) | async def main():
FILE: examples/memory/openai_session_hitl_example.py
function _needs_approval (line 15) | async def _needs_approval(_ctx, _params, _call_id) -> bool:
function get_weather (line 21) | def get_weather(location: str) -> str:
function prompt_yes_no (line 44) | async def prompt_yes_no(question: str) -> bool:
function main (line 56) | async def main():
FILE: examples/memory/redis_session_example.py
function main (line 17) | async def main():
function demonstrate_advanced_features (line 149) | async def demonstrate_advanced_features():
FILE: examples/memory/sqlalchemy_session_example.py
function main (line 7) | async def main():
FILE: examples/memory/sqlite_session_example.py
function main (line 13) | async def main():
FILE: examples/model_providers/custom_example_agent.py
function get_weather (line 36) | def get_weather(city: str):
function main (line 41) | async def main():
FILE: examples/model_providers/custom_example_global.py
function get_weather (line 45) | def get_weather(city: str):
function main (line 50) | async def main():
FILE: examples/model_providers/custom_example_provider.py
class CustomModelProvider (line 43) | class CustomModelProvider(ModelProvider):
method get_model (line 44) | def get_model(self, model_name: str | None) -> Model:
function get_weather (line 52) | def get_weather(city: str):
function main (line 57) | async def main():
FILE: examples/model_providers/litellm_auto.py
function get_weather (line 20) | def get_weather(city: str):
class Result (line 25) | class Result(BaseModel):
function main (line 30) | async def main():
FILE: examples/model_providers/litellm_provider.py
function get_weather (line 22) | def get_weather(city: str):
function main (line 27) | async def main(model: str, api_key: str):
FILE: examples/realtime/app/agent.py
function faq_lookup_tool (line 17) | async def faq_lookup_tool(question: str) -> str:
function update_seat (line 42) | async def update_seat(confirmation_number: str, new_seat: str) -> str:
function get_weather (line 54) | def get_weather(city: str) -> str:
function get_starting_agent (line 100) | def get_starting_agent() -> RealtimeAgent:
FILE: examples/realtime/app/server.py
class RealtimeWebSocketManager (line 38) | class RealtimeWebSocketManager:
method __init__ (line 39) | def __init__(self):
method connect (line 44) | async def connect(self, websocket: WebSocket, session_id: str):
method disconnect (line 73) | async def disconnect(self, session_id: str):
method send_audio (line 82) | async def send_audio(self, session_id: str, audio_bytes: bytes):
method send_client_event (line 86) | async def send_client_event(self, session_id: str, event: dict[str, An...
method send_user_message (line 100) | async def send_user_message(self, session_id: str, message: RealtimeUs...
method approve_tool_call (line 107) | async def approve_tool_call(self, session_id: str, call_id: str, *, al...
method reject_tool_call (line 114) | async def reject_tool_call(self, session_id: str, call_id: str, *, alw...
method interrupt (line 121) | async def interrupt(self, session_id: str) -> None:
method _process_events (line 128) | async def _process_events(self, session_id: str):
method _sanitize_history_item (line 140) | def _sanitize_history_item(self, item: RealtimeItem) -> dict[str, Any]:
method _serialize_event (line 157) | async def _serialize_event(self, event: RealtimeSessionEvent) -> dict[...
function lifespan (line 215) | async def lifespan(app: FastAPI):
function websocket_endpoint (line 223) | async def websocket_endpoint(websocket: WebSocket, session_id: str):
function read_index (line 383) | async def read_index():
FILE: examples/realtime/app/static/app.js
class RealtimeDemo (line 1) | class RealtimeDemo {
method constructor (line 2) | constructor() {
method initializeElements (line 26) | initializeElements() {
method setupEventListeners (line 38) | setupEventListeners() {
method generateSessionId (line 99) | generateSessionId() {
method connect (line 103) | async connect() {
method disconnect (line 132) | disconnect() {
method updateConnectionUI (line 139) | updateConnectionUI() {
method toggleMute (line 155) | toggleMute() {
method updateMuteUI (line 160) | updateMuteUI() {
method readFileAsDataURL (line 173) | readFileAsDataURL(file) {
method prepareDataURL (line 182) | async prepareDataURL(file) {
method startContinuousCapture (line 211) | async startContinuousCapture() {
method stopContinuousCapture (line 267) | stopContinuousCapture() {
method handleRealtimeEvent (line 296) | handleRealtimeEvent(event) {
method updateLastMessageFromHistory (line 334) | updateLastMessageFromHistory(history) {
method syncMissingFromHistory (line 385) | syncMissingFromHistory(history) {
method addMessageFromItem (line 397) | addMessageFromItem(item) {
method addMessage (line 439) | addMessage(type, content) {
method addImageMessage (line 454) | addImageMessage(role, imageUrl, caption = '') {
method addUserImageMessage (line 483) | addUserImageMessage(imageUrl, caption = '') {
method addRawEvent (line 487) | addRawEvent(event) {
method addToolEvent (line 516) | addToolEvent(event) {
method promptForToolApproval (line 562) | promptForToolApproval(event) {
method playAudio (line 582) | async playAudio(audioBase64) {
method ensurePlaybackNode (line 605) | async ensurePlaybackNode() {
method flushPendingPlaybackChunks (line 649) | flushPendingPlaybackChunks() {
method decodeBase64ToInt16 (line 672) | decodeBase64ToInt16(audioBase64) {
method stopAudioPlayback (line 687) | stopAudioPlayback() {
method scrollToBottom (line 705) | scrollToBottom() {
FILE: examples/realtime/app/static/audio-playback.worklet.js
class PCMPlaybackProcessor (line 1) | class PCMPlaybackProcessor extends AudioWorkletProcessor {
method constructor (line 2) | constructor() {
method reset (line 55) | reset() {
method hasPendingAudio (line 62) | hasPendingAudio() {
method pullSample (line 69) | pullSample() {
method process (line 92) | process(inputs, outputs) {
FILE: examples/realtime/app/static/audio-recorder.worklet.js
class PCMRecorderProcessor (line 1) | class PCMRecorderProcessor extends AudioWorkletProcessor {
method constructor (line 2) | constructor() {
method flushBuffer (line 11) | flushBuffer() {
method process (line 24) | process(inputs) {
FILE: examples/realtime/cli/demo.py
function get_weather (line 38) | def get_weather(city: str) -> str:
function _truncate_str (line 50) | def _truncate_str(s: str, max_length: int) -> str:
class NoUIDemo (line 56) | class NoUIDemo:
method __init__ (line 57) | def __init__(self) -> None:
method _output_callback (line 84) | def _output_callback(self, outdata, frames: int, time, status) -> None:
method run (line 208) | async def run(self) -> None:
method start_audio_recording (line 257) | async def start_audio_recording(self) -> None:
method capture_audio (line 272) | async def capture_audio(self) -> None:
method _on_event (line 324) | async def _on_event(self, event: RealtimeSessionEvent) -> None:
method _compute_rms (line 362) | def _compute_rms(self, samples: np.ndarray[Any, np.dtype[Any]]) -> float:
method _update_playback_rms (line 369) | def _update_playback_rms(self, samples: np.ndarray[Any, np.dtype[Any]]...
FILE: examples/realtime/twilio/server.py
class TwilioWebSocketManager (line 21) | class TwilioWebSocketManager:
method __init__ (line 22) | def __init__(self):
method new_session (line 25) | async def new_session(self, websocket: WebSocket) -> TwilioHandler:
function root (line 40) | async def root():
function incoming_call (line 46) | async def incoming_call(request: Request):
function media_stream_endpoint (line 61) | async def media_stream_endpoint(websocket: WebSocket):
FILE: examples/realtime/twilio/twilio_handler.py
function get_weather (line 24) | def get_weather(city: str) -> str:
function get_current_time (line 30) | def get_current_time() -> str:
class TwilioHandler (line 45) | class TwilioHandler:
method __init__ (line 46) | def __init__(self, twilio_websocket: WebSocket):
method start (line 85) | async def start(self) -> None:
method wait_until_done (line 123) | async def wait_until_done(self) -> None:
method _realtime_session_loop (line 128) | async def _realtime_session_loop(self) -> None:
method _twilio_message_loop (line 137) | async def _twilio_message_loop(self) -> None:
method _handle_realtime_event (line 149) | async def _handle_realtime_event(self, event: RealtimeSessionEvent) ->...
method _handle_twilio_message (line 194) | async def _handle_twilio_message(self, message: dict[str, Any]) -> None:
method _handle_media_event (line 214) | async def _handle_media_event(self, message: dict[str, Any]) -> None:
method _handle_mark_event (line 234) | async def _handle_mark_event(self, message: dict[str, Any]) -> None:
method _flush_audio_buffer (line 252) | async def _flush_audio_buffer(self) -> None:
method _buffer_flush_loop (line 284) | async def _buffer_flush_loop(self) -> None:
FILE: examples/realtime/twilio_sip/agents.py
function faq_lookup_tool (line 20) | async def faq_lookup_tool(question: str) -> str:
function update_customer_record (line 36) | async def update_customer_record(customer_id: str, note: str) -> str:
function get_starting_agent (line 84) | def get_starting_agent() -> RealtimeAgent:
FILE: examples/realtime/twilio_sip/server.py
function _get_env (line 32) | def _get_env(name: str) -> str:
function accept_call (line 53) | async def accept_call(call_id: str) -> None:
function observe_call (line 99) | async def observe_call(call_id: str) -> None:
function _track_call_task (line 174) | def _track_call_task(call_id: str) -> None:
function openai_webhook (line 191) | async def openai_webhook(request: Request) -> Response:
function healthcheck (line 210) | async def healthcheck() -> dict[str, str]:
FILE: examples/reasoning_content/gpt_oss_stream.py
function main (line 29) | async def main():
FILE: examples/reasoning_content/main.py
function stream_with_reasoning_content (line 27) | async def stream_with_reasoning_content():
function get_response_with_reasoning_content (line 70) | async def get_response_with_reasoning_content():
function main (line 117) | async def main():
FILE: examples/reasoning_content/runner_example.py
function main (line 23) | async def main():
FILE: examples/research_bot/agents/planner_agent.py
class WebSearchItem (line 12) | class WebSearchItem(BaseModel):
class WebSearchPlan (line 20) | class WebSearchPlan(BaseModel):
FILE: examples/research_bot/agents/writer_agent.py
class ReportData (line 18) | class ReportData(BaseModel):
FILE: examples/research_bot/main.py
function main (line 8) | async def main() -> None:
FILE: examples/research_bot/manager.py
class ResearchManager (line 16) | class ResearchManager:
method __init__ (line 17) | def __init__(self):
method run (line 21) | async def run(self, query: str) -> None:
method _plan_searches (line 52) | async def _plan_searches(self, query: str) -> WebSearchPlan:
method _perform_searches (line 65) | async def _perform_searches(self, search_plan: WebSearchPlan) -> list[...
method _search (line 94) | async def _search(self, item: WebSearchItem) -> str | None:
method _write_report (line 105) | async def _write_report(self, query: str, search_results: list[str]) -...
FILE: examples/research_bot/printer.py
class Printer (line 8) | class Printer:
method __init__ (line 9) | def __init__(self, console: Console):
method end (line 15) | def end(self) -> None:
method hide_done_checkmark (line 18) | def hide_done_checkmark(self, item_id: str) -> None:
method update_item (line 21) | def update_item(
method mark_item_done (line 29) | def mark_item_done(self, item_id: str) -> None:
method flush (line 33) | def flush(self) -> None:
FILE: examples/run_examples.py
class ExampleScript (line 54) | class ExampleScript:
method relpath (line 59) | def relpath(self) -> str:
method module (line 63) | def module(self) -> str:
method command (line 68) | def command(self) -> list[str]:
class ExampleResult (line 74) | class ExampleResult:
function normalize_relpath (line 82) | def normalize_relpath(relpath: str) -> str:
function parse_args (line 87) | def parse_args() -> argparse.Namespace:
function detect_tags (line 176) | def detect_tags(path: Path, source: str) -> set[str]:
function discover_examples (line 208) | def discover_examples(filters: Iterable[str]) -> list[ExampleScript]:
function should_skip (line 235) | def should_skip(
function format_command (line 249) | def format_command(cmd: Sequence[str]) -> str:
function display_path (line 253) | def display_path(path: Path) -> str:
function env_flag (line 260) | def env_flag(name: str) -> bool | None:
function load_auto_skip (line 267) | def load_auto_skip() -> set[str]:
function write_main_log_line (line 275) | def write_main_log_line(handle, line: str) -> None:
function ensure_dirs (line 280) | def ensure_dirs(path: Path, is_file: bool | None = None) -> None:
function parse_rerun_from_log (line 294) | def parse_rerun_from_log(log_path: Path) -> list[str]:
function run_examples (line 312) | def run_examples(examples: Sequence[ExampleScript], args: argparse.Names...
function main (line 533) | def main() -> int:
FILE: examples/tools/apply_patch.py
class ApprovalTracker (line 13) | class ApprovalTracker:
method __init__ (line 14) | def __init__(self) -> None:
method fingerprint (line 17) | def fingerprint(self, operation: ApplyPatchOperation, relative_path: s...
method remember (line 26) | def remember(self, fingerprint: str) -> None:
method is_approved (line 29) | def is_approved(self, fingerprint: str) -> bool:
class WorkspaceEditor (line 33) | class WorkspaceEditor:
method __init__ (line 34) | def __init__(self, root: Path, approvals: ApprovalTracker, auto_approv...
method create_file (line 39) | def create_file(self, operation: ApplyPatchOperation) -> ApplyPatchRes...
method update_file (line 48) | def update_file(self, operation: ApplyPatchOperation) -> ApplyPatchRes...
method delete_file (line 58) | def delete_file(self, operation: ApplyPatchOperation) -> ApplyPatchRes...
method _relative_path (line 65) | def _relative_path(self, value: str) -> str:
method _resolve (line 69) | def _resolve(self, relative: str, ensure_parent: bool = False) -> Path:
method _require_approval (line 81) | def _require_approval(self, operation: ApplyPatchOperation, display_pa...
function main (line 99) | async def main(auto_approve: bool, model: str) -> None:
FILE: examples/tools/code_interpreter.py
function _get_field (line 8) | def _get_field(obj: Any, key: str) -> Any:
function main (line 14) | async def main():
FILE: examples/tools/codex.py
function on_codex_stream (line 34) | async def on_codex_stream(payload: CodexToolStreamEvent) -> None:
function _timestamp (line 93) | def _timestamp() -> str:
function log (line 97) | def log(message: str) -> None:
function main (line 104) | async def main() -> None:
FILE: examples/tools/codex_same_thread.py
function on_codex_stream (line 25) | async def on_codex_stream(payload: CodexToolStreamEvent) -> None:
function _timestamp (line 44) | def _timestamp() -> str:
function log (line 48) | def log(message: str) -> None:
function read_context_value (line 55) | def read_context_value(context: Mapping[str, str] | BaseModel, key: str)...
function main (line 62) | async def main() -> None:
FILE: examples/tools/computer_use.py
class LocalPlaywrightComputer (line 58) | class LocalPlaywrightComputer(AsyncComputer):
method __init__ (line 61) | def __init__(self):
method _get_browser_and_page (line 66) | async def _get_browser_and_page(self) -> tuple[Browser, Page]:
method __aenter__ (line 75) | async def __aenter__(self):
method __aexit__ (line 81) | async def __aexit__(self, exc_type, exc_val, exc_tb):
method open (line 88) | async def open(self) -> "LocalPlaywrightComputer":
method close (line 93) | async def close(self) -> None:
method playwright (line 98) | def playwright(self) -> Playwright:
method browser (line 103) | def browser(self) -> Browser:
method page (line 108) | def page(self) -> Page:
method dimensions (line 113) | def dimensions(self) -> tuple[int, int]:
method screenshot (line 116) | async def screenshot(self) -> str:
method click (line 121) | async def click(self, x: int, y: int, button: Button = "left") -> None:
method double_click (line 130) | async def double_click(self, x: int, y: int) -> None:
method scroll (line 133) | async def scroll(self, x: int, y: int, scroll_x: int, scroll_y: int) -...
method type (line 137) | async def type(self, text: str) -> None:
method wait (line 140) | async def wait(self) -> None:
method move (line 143) | async def move(self, x: int, y: int) -> None:
method keypress (line 146) | async def keypress(self, keys: list[str]) -> None:
method drag (line 153) | async def drag(self, path: list[tuple[int, int]]) -> None:
function run_agent (line 163) | async def run_agent(
function singleton_computer (line 178) | async def singleton_computer() -> None:
function computer_per_request (line 184) | async def computer_per_request() -> None:
FILE: examples/tools/container_shell_inline_skill.py
function build_skill_zip_bundle (line 18) | def build_skill_zip_bundle() -> bytes:
function build_inline_skill (line 28) | def build_inline_skill() -> ShellToolInlineSkill:
function extract_container_id (line 42) | def extract_container_id(raw_responses: list[ModelResponse]) -> str | None:
function main (line 53) | async def main(model: str) -> None:
FILE: examples/tools/container_shell_skill_reference.py
function resolve_skill_reference (line 20) | def resolve_skill_reference() -> ShellToolSkillReference:
function extract_container_id (line 32) | def extract_container_id(raw_responses: list[ModelResponse]) -> str | None:
function main (line 43) | async def main(model: str) -> None:
FILE: examples/tools/file_search.py
function main (line 8) | async def main():
FILE: examples/tools/image_generator.py
function _get_field (line 14) | def _get_field(obj: Any, key: str) -> Any:
function open_file (line 20) | def open_file(path: str) -> None:
function main (line 31) | async def main():
FILE: examples/tools/local_shell_skill.py
function build_local_skill (line 12) | def build_local_skill() -> ShellToolLocalSkill:
function main (line 20) | async def main(model: str) -> None:
FILE: examples/tools/shell.py
class ShellExecutor (line 25) | class ShellExecutor:
method __init__ (line 28) | def __init__(self, cwd: Path | None = None):
method __call__ (line 31) | async def __call__(self, request: ShellCommandRequest) -> ShellResult:
function prompt_shell_approval (line 77) | async def prompt_shell_approval(commands: Sequence[str]) -> bool:
function main (line 88) | async def main(prompt: str, model: str) -> None:
FILE: examples/tools/shell_human_in_the_loop.py
class ShellExecutor (line 22) | class ShellExecutor:
method __init__ (line 25) | def __init__(self, cwd: Path | None = None):
method __call__ (line 28) | async def __call__(self, request: ShellCommandRequest) -> ShellResult:
function prompt_shell_approval (line 74) | async def prompt_shell_approval(commands: Sequence[str]) -> tuple[bool, ...
function _extract_commands (line 90) | def _extract_commands(approval_item: ToolApprovalItem) -> Sequence[str]:
function main (line 104) | async def main(prompt: str, model: str) -> None:
FILE: examples/tools/tool_search.py
function get_customer_profile (line 46) | def get_customer_profile(
function list_open_orders (line 54) | def list_open_orders(
function get_invoice_status (line 62) | def get_invoice_status(
function get_shipping_eta (line 70) | def get_shipping_eta(
function get_shipping_credit_balance (line 78) | def get_shipping_credit_balance(
function loaded_paths (line 121) | def loaded_paths(result: Any) -> list[str]:
function print_result (line 163) | def print_result(title: str, result: Any, registered_paths: list[str]) -...
function run_namespaced_example (line 181) | async def run_namespaced_example() -> None:
function run_top_level_example (line 193) | async def run_top_level_example() -> None:
function main (line 205) | async def main() -> None:
FILE: examples/tools/web_search.py
function main (line 6) | async def main():
FILE: examples/tools/web_search_filters.py
function _get_field (line 13) | def _get_field(obj: Any, key: str) -> Any:
function _normalized_source_urls (line 23) | def _normalized_source_urls(sources: Any) -> list[str]:
function main (line 70) | async def main():
FILE: examples/voice/static/main.py
function get_weather (line 34) | def get_weather(city: str) -> str:
class WorkflowCallbacks (line 61) | class WorkflowCallbacks(SingleAgentWorkflowCallbacks):
method on_run (line 62) | def on_run(self, workflow: SingleAgentVoiceWorkflow, transcription: st...
function main (line 66) | async def main():
FILE: examples/voice/static/util.py
function _record_audio (line 9) | def _record_audio(screen: curses.window) -> npt.NDArray[np.float32]:
function record_audio (line 50) | def record_audio():
class AudioPlayer (line 58) | class AudioPlayer:
method __enter__ (line 59) | def __enter__(self):
method __exit__ (line 64) | def __exit__(self, exc_type, exc_value, traceback):
method add_audio (line 68) | def add_audio(self, audio_data: npt.NDArray[np.int16]):
FILE: examples/voice/streamed/main.py
class Header (line 36) | class Header(Static):
method render (line 42) | def render(self) -> str:
class AudioStatusIndicator (line 46) | class AudioStatusIndicator(Static):
method render (line 52) | def render(self) -> str:
class RealtimeApp (line 61) | class RealtimeApp(App[None]):
method __init__ (line 124) | def __init__(self) -> None:
method _on_transcription (line 139) | def _on_transcription(self, transcription: str) -> None:
method compose (line 146) | def compose(self) -> ComposeResult:
method on_mount (line 153) | async def on_mount(self) -> None:
method start_voice_pipeline (line 157) | async def start_voice_pipeline(self) -> None:
method send_mic_audio (line 177) | async def send_mic_audio(self) -> None:
method on_key (line 211) | async def on_key(self, event: events.Key) -> None:
FILE: examples/voice/streamed/my_workflow.py
function get_weather (line 11) | def get_weather(city: str) -> str:
class MyWorkflow (line 38) | class MyWorkflow(VoiceWorkflowBase):
method __init__ (line 39) | def __init__(self, secret_word: str, on_start: Callable[[str], None]):
method run (line 51) | async def run(self, transcription: str) -> AsyncIterator[str]:
FILE: src/agents/__init__.py
function set_default_openai_key (line 228) | def set_default_openai_key(key: str, use_for_tracing: bool = True) -> None:
function set_default_openai_client (line 243) | def set_default_openai_client(client: AsyncOpenAI, use_for_tracing: bool...
function set_default_openai_api (line 256) | def set_default_openai_api(api: Literal["chat_completions", "responses"]...
function set_default_openai_responses_transport (line 263) | def set_default_openai_responses_transport(transport: Literal["http", "w...
function enable_verbose_stdout_logging (line 272) | def enable_verbose_stdout_logging():
FILE: src/agents/_config.py
function set_default_openai_key (line 8) | def set_default_openai_key(key: str, use_for_tracing: bool) -> None:
function set_default_openai_client (line 15) | def set_default_openai_client(client: AsyncOpenAI, use_for_tracing: bool...
function set_default_openai_api (line 22) | def set_default_openai_api(api: Literal["chat_completions", "responses"]...
function set_default_openai_responses_transport (line 29) | def set_default_openai_responses_transport(transport: Literal["http", "w...
FILE: src/agents/_debug.py
function _debug_flag_enabled (line 4) | def _debug_flag_enabled(flag: str, default: bool = False) -> bool:
function _load_dont_log_model_data (line 12) | def _load_dont_log_model_data() -> bool:
function _load_dont_log_tool_data (line 16) | def _load_dont_log_tool_data() -> bool:
FILE: src/agents/_mcp_tool_metadata.py
class MCPToolMetadata (line 9) | class MCPToolMetadata:
function _get_mapping_or_attr (line 16) | def _get_mapping_or_attr(value: Any, key: str) -> Any:
function _get_non_empty_string (line 22) | def _get_non_empty_string(value: Any) -> str | None:
function resolve_mcp_tool_title (line 28) | def resolve_mcp_tool_title(tool: Any) -> str | None:
function resolve_mcp_tool_description (line 38) | def resolve_mcp_tool_description(tool: Any) -> str | None:
function resolve_mcp_tool_description_for_model (line 43) | def resolve_mcp_tool_description_for_model(tool: Any) -> str:
function extract_mcp_tool_metadata (line 54) | def extract_mcp_tool_metadata(tool: Any) -> MCPToolMetadata:
function collect_mcp_list_tools_metadata (line 62) | def collect_mcp_list_tools_metadata(items: Iterable[Any]) -> dict[tuple[...
FILE: src/agents/_tool_identity.py
class SerializedFunctionToolLookupKey (line 21) | class SerializedFunctionToolLookupKey(TypedDict, total=False):
function get_mapping_or_attr (line 29) | def get_mapping_or_attr(value: Any, key: str) -> Any:
function tool_qualified_name (line 36) | def tool_qualified_name(name: str | None, namespace: str | None = None) ...
function tool_trace_name (line 45) | def tool_trace_name(name: str | None, namespace: str | None = None) -> s...
function is_reserved_synthetic_tool_namespace (line 52) | def is_reserved_synthetic_tool_namespace(name: str | None, namespace: st...
function get_tool_call_namespace (line 63) | def get_tool_call_namespace(tool_call: Any) -> str | None:
function get_tool_call_name (line 69) | def get_tool_call_name(tool_call: Any) -> str | None:
function get_tool_call_qualified_name (line 75) | def get_tool_call_qualified_name(tool_call: Any) -> str | None:
function get_function_tool_lookup_key (line 83) | def get_function_tool_lookup_key(
function get_function_tool_lookup_key_for_call (line 97) | def get_function_tool_lookup_key_for_call(tool_call: Any) -> FunctionToo...
function get_function_tool_lookup_key_for_tool (line 105) | def get_function_tool_lookup_key_for_tool(tool: Any) -> FunctionToolLook...
function serialize_function_tool_lookup_key (line 115) | def serialize_function_tool_lookup_key(
function deserialize_function_tool_lookup_key (line 135) | def deserialize_function_tool_lookup_key(data: Any) -> FunctionToolLooku...
function get_tool_call_trace_name (line 156) | def get_tool_call_trace_name(tool_call: Any) -> str | None:
function get_tool_trace_name_for_tool (line 164) | def get_tool_trace_name_for_tool(tool: Any) -> str | None:
function _remove_tool_call_namespace (line 174) | def _remove_tool_call_namespace(tool_call: Any) -> Any:
function has_function_tool_shape (line 194) | def has_function_tool_shape(tool: Any) -> bool:
function get_function_tool_public_name (line 201) | def get_function_tool_public_name(tool: Any) -> str | None:
function get_function_tool_namespace (line 209) | def get_function_tool_namespace(tool: Any) -> str | None:
function get_explicit_function_tool_namespace (line 214) | def get_explicit_function_tool_namespace(tool: Any) -> str | None:
function get_function_tool_namespace_description (line 222) | def get_function_tool_namespace_description(tool: Any) -> str | None:
function is_deferred_top_level_function_tool (line 228) | def is_deferred_top_level_function_tool(tool: Any) -> bool:
function get_function_tool_dispatch_name (line 237) | def get_function_tool_dispatch_name(tool: Any) -> str | None:
function get_function_tool_lookup_keys (line 245) | def get_function_tool_lookup_keys(tool: Any) -> tuple[FunctionToolLookup...
function should_allow_bare_name_approval_alias (line 266) | def should_allow_bare_name_approval_alias(tool: Any, all_tools: Sequence...
function get_deferred_top_level_function_tool_lookup_key (line 284) | def get_deferred_top_level_function_tool_lookup_key(
function validate_function_tool_namespace_shape (line 294) | def validate_function_tool_namespace_shape(
function validate_function_tool_lookup_configuration (line 310) | def validate_function_tool_lookup_configuration(tools: Sequence[Any]) ->...
function build_function_tool_lookup_map (line 352) | def build_function_tool_lookup_map(tools: Sequence[Any]) -> dict[Functio...
function get_function_tool_approval_keys (line 362) | def get_function_tool_approval_keys(
function normalize_tool_call_for_function_tool (line 413) | def normalize_tool_call_for_function_tool(tool_call: Any, tool: Any) -> ...
function get_function_tool_qualified_name (line 428) | def get_function_tool_qualified_name(tool: Any) -> str | None:
function get_function_tool_trace_name (line 433) | def get_function_tool_trace_name(tool: Any) -> str | None:
FILE: src/agents/agent.py
class ToolsToFinalOutputResult (line 74) | class ToolsToFinalOutputResult:
function _validate_codex_tool_name_collisions (line 95) | def _validate_codex_tool_name_collisions(tools: list[Tool]) -> None:
class AgentToolStreamEvent (line 121) | class AgentToolStreamEvent(TypedDict):
class StopAtTools (line 134) | class StopAtTools(TypedDict):
class MCPConfig (line 139) | class MCPConfig(TypedDict):
class AgentBase (line 155) | class AgentBase(Generic[TContext]):
method get_mcp_tools (line 183) | async def get_mcp_tools(self, run_context: RunContextWrapper[TContext]...
method get_all_tools (line 197) | async def get_all_tools(self, run_context: RunContextWrapper[TContext]...
class Agent (line 221) | class Agent(AgentBase, Generic[TContext]):
method __post_init__ (line 322) | def __post_init__(self):
method clone (line 455) | def clone(self, **kwargs: Any) -> Agent[TContext]:
method as_tool (line 470) | def as_tool(
method get_system_prompt (line 875) | async def get_system_prompt(self, run_context: RunContextWrapper[TCont...
method get_prompt (line 904) | async def get_prompt(
FILE: src/agents/agent_output.py
class AgentOutputSchemaBase (line 16) | class AgentOutputSchemaBase(abc.ABC):
method is_plain_text (line 22) | def is_plain_text(self) -> bool:
method name (line 27) | def name(self) -> str:
method json_schema (line 32) | def json_schema(self) -> dict[str, Any]:
method is_strict_json_schema (line 39) | def is_strict_json_schema(self) -> bool:
method validate_json (line 47) | def validate_json(self, json_str: str) -> Any:
class AgentOutputSchema (line 55) | class AgentOutputSchema(AgentOutputSchemaBase):
method __init__ (line 79) | def __init__(self, output_type: type[Any], strict_json_schema: bool = ...
method is_plain_text (line 122) | def is_plain_text(self) -> bool:
method is_strict_json_schema (line 126) | def is_strict_json_schema(self) -> bool:
method json_schema (line 130) | def json_schema(self) -> dict[str, Any]:
method validate_json (line 136) | def validate_json(self, json_str: str) -> Any:
method name (line 166) | def name(self) -> str:
function _is_subclass_of_base_model_or_dict (line 171) | def _is_subclass_of_base_model_or_dict(t: Any) -> bool:
function _type_to_str (line 183) | def _type_to_str(t: type[Any]) -> str:
FILE: src/agents/agent_tool_input.py
class AgentAsToolInput (line 21) | class AgentAsToolInput(BaseModel):
class StructuredInputSchemaInfo (line 28) | class StructuredInputSchemaInfo:
class StructuredToolInputBuilderOptions (line 35) | class StructuredToolInputBuilderOptions(TypedDict, total=False):
function default_tool_input_builder (line 50) | def default_tool_input_builder(options: StructuredToolInputBuilderOption...
function resolve_agent_tool_input (line 79) | async def resolve_agent_tool_input(
function build_structured_input_schema_info (line 110) | def build_structured_input_schema_info(
function is_agent_tool_input (line 123) | def is_agent_tool_input(value: Any) -> bool:
function _has_only_input_field (line 128) | def _has_only_input_field(value: dict[str, Any]) -> bool:
class _SchemaSummaryField (line 134) | class _SchemaSummaryField:
class _SchemaFieldDescription (line 142) | class _SchemaFieldDescription:
class _SchemaSummary (line 148) | class _SchemaSummary:
function _build_schema_summary (line 153) | def _build_schema_summary(parameters: dict[str, Any]) -> str | None:
function _format_schema_summary (line 160) | def _format_schema_summary(summary: _SchemaSummary) -> str:
function _summarize_json_schema (line 171) | def _summarize_json_schema(schema: dict[str, Any]) -> _SchemaSummary | N...
function _describe_json_schema_field (line 209) | def _describe_json_schema_field(
function _read_schema_description (line 248) | def _read_schema_description(value: Any) -> str | None:
function _format_enum_label (line 257) | def _format_enum_label(values: list[Any] | None) -> str:
function _format_literal_label (line 265) | def _format_literal_label(schema: dict[str, Any]) -> str:
FILE: src/agents/agent_tool_state.py
function get_agent_tool_state_scope (line 30) | def get_agent_tool_state_scope(context: Any) -> str | None:
function set_agent_tool_state_scope (line 36) | def set_agent_tool_state_scope(context: Any, scope_id: str | None) -> None:
function _tool_call_signature (line 52) | def _tool_call_signature(
function _scoped_tool_call_signature (line 66) | def _scoped_tool_call_signature(
function _index_agent_tool_run_result (line 73) | def _index_agent_tool_run_result(
function _drop_agent_tool_run_result (line 85) | def _drop_agent_tool_run_result(tool_call_obj_id: int) -> None:
function _register_tool_call_ref (line 107) | def _register_tool_call_ref(tool_call: ResponseFunctionToolCall, tool_ca...
function record_agent_tool_run_result (line 119) | def record_agent_tool_run_result(
function _tool_call_obj_matches_scope (line 132) | def _tool_call_obj_matches_scope(tool_call_obj_id: int, *, scope_id: str...
function consume_agent_tool_run_result (line 140) | def consume_agent_tool_run_result(
function peek_agent_tool_run_result (line 167) | def peek_agent_tool_run_result(
function drop_agent_tool_run_result (line 190) | def drop_agent_tool_run_result(
FILE: src/agents/apply_diff.py
class Chunk (line 14) | class Chunk:
class ParserState (line 21) | class ParserState:
class ParsedUpdateDiff (line 28) | class ParsedUpdateDiff:
class ReadSectionResult (line 34) | class ReadSectionResult:
function apply_diff (line 52) | def apply_diff(input: str, diff: str, mode: ApplyDiffMode = "default") -...
function _normalize_diff_lines (line 68) | def _normalize_diff_lines(diff: str) -> list[str]:
function _detect_newline_from_text (line 75) | def _detect_newline_from_text(text: str) -> str:
function _detect_newline (line 79) | def _detect_newline(input: str, diff: str, mode: ApplyDiffMode) -> str:
function _normalize_text_newlines (line 87) | def _normalize_text_newlines(text: str) -> str:
function _is_done (line 92) | def _is_done(state: ParserState, prefixes: Sequence[str]) -> bool:
function _read_str (line 100) | def _read_str(state: ParserState, prefix: str) -> str:
function _parse_create_diff (line 110) | def _parse_create_diff(lines: list[str], newline: str) -> str:
function _parse_update_diff (line 126) | def _parse_update_diff(lines: list[str], input: str) -> ParsedUpdateDiff:
function _advance_cursor_to_anchor (line 171) | def _advance_cursor_to_anchor(
function _read_section (line 197) | def _read_section(lines: list[str], start_index: int) -> ReadSectionResult:
class ContextMatch (line 276) | class ContextMatch:
function _find_context (line 281) | def _find_context(lines: list[str], context: list[str], start: int, eof:...
function _find_context_core (line 292) | def _find_context_core(lines: list[str], context: list[str], start: int)...
function _equals_slice (line 309) | def _equals_slice(
function _apply_chunks (line 320) | def _apply_chunks(input: str, chunks: list[Chunk], newline: str) -> str:
FILE: src/agents/computer.py
class Computer (line 8) | class Computer(abc.ABC):
method environment (line 13) | def environment(self) -> Environment | None:
method dimensions (line 18) | def dimensions(self) -> tuple[int, int] | None:
method screenshot (line 23) | def screenshot(self) -> str:
method click (line 27) | def click(self, x: int, y: int, button: Button) -> None:
method double_click (line 31) | def double_click(self, x: int, y: int) -> None:
method scroll (line 35) | def scroll(self, x: int, y: int, scroll_x: int, scroll_y: int) -> None:
method type (line 39) | def type(self, text: str) -> None:
method wait (line 43) | def wait(self) -> None:
method move (line 47) | def move(self, x: int, y: int) -> None:
method keypress (line 51) | def keypress(self, keys: list[str]) -> None:
method drag (line 55) | def drag(self, path: list[tuple[int, int]]) -> None:
class AsyncComputer (line 59) | class AsyncComputer(abc.ABC):
method environment (line 64) | def environment(self) -> Environment | None:
method dimensions (line 69) | def dimensions(self) -> tuple[int, int] | None:
method screenshot (line 74) | async def screenshot(self) -> str:
method click (line 78) | async def click(self, x: int, y: int, button: Button) -> None:
method double_click (line 82) | async def double_click(self, x: int, y: int) -> None:
method scroll (line 86) | async def scroll(self, x: int, y: int, scroll_x: int, scroll_y: int) -...
method type (line 90) | async def type(self, text: str) -> None:
method wait (line 94) | async def wait(self) -> None:
method move (line 98) | async def move(self, x: int, y: int) -> None:
method keypress (line 102) | async def keypress(self, keys: list[str]) -> None:
method drag (line 106) | async def drag(self, path: list[tuple[int, int]]) -> None:
FILE: src/agents/editor.py
class ApplyPatchOperation (line 16) | class ApplyPatchOperation:
class ApplyPatchResult (line 26) | class ApplyPatchResult:
class ApplyPatchEditor (line 34) | class ApplyPatchEditor(Protocol):
method create_file (line 37) | def create_file(
method update_file (line 41) | def update_file(
method delete_file (line 45) | def delete_file(
FILE: src/agents/exceptions.py
class RunErrorDetails (line 21) | class RunErrorDetails:
method __str__ (line 32) | def __str__(self) -> str:
class AgentsException (line 36) | class AgentsException(Exception):
method __init__ (line 41) | def __init__(self, *args: object) -> None:
class MaxTurnsExceeded (line 46) | class MaxTurnsExceeded(AgentsException):
method __init__ (line 51) | def __init__(self, message: str):
class ModelBehaviorError (line 56) | class ModelBehaviorError(AgentsException):
method __init__ (line 63) | def __init__(self, message: str):
class UserError (line 68) | class UserError(AgentsException):
method __init__ (line 73) | def __init__(self, message: str):
class MCPToolCancellationError (line 78) | class MCPToolCancellationError(AgentsException):
method __init__ (line 83) | def __init__(self, message: str):
class ToolTimeoutError (line 88) | class ToolTimeoutError(AgentsException):
method __init__ (line 94) | def __init__(self, tool_name: str, timeout_seconds: float):
class InputGuardrailTripwireTriggered (line 100) | class InputGuardrailTripwireTriggered(AgentsException):
method __init__ (line 106) | def __init__(self, guardrail_result: InputGuardrailResult):
class OutputGuardrailTripwireTriggered (line 113) | class OutputGuardrailTripwireTriggered(AgentsException):
method __init__ (line 119) | def __init__(self, guardrail_result: OutputGuardrailResult):
class ToolInputGuardrailTripwireTriggered (line 126) | class ToolInputGuardrailTripwireTriggered(AgentsException):
method __init__ (line 135) | def __init__(self, guardrail: ToolInputGuardrail[Any], output: ToolGua...
class ToolOutputGuardrailTripwireTriggered (line 141) | class ToolOutputGuardrailTripwireTriggered(AgentsException):
method __init__ (line 150) | def __init__(self, guardrail: ToolOutputGuardrail[Any], output: ToolGu...
FILE: src/agents/extensions/experimental/codex/codex.py
class _UnsetType (line 14) | class _UnsetType:
class Codex (line 21) | class Codex:
method __init__ (line 23) | def __init__(self, options: CodexOptions | Mapping[str, Any] | None = ...
method __init__ (line 26) | def __init__(
method __init__ (line 36) | def __init__(
method start_thread (line 69) | def start_thread(self, options: ThreadOptions | Mapping[str, Any] | No...
method resume_thread (line 77) | def resume_thread(
function _normalize_env (line 89) | def _normalize_env(options: CodexOptions) -> dict[str, str] | None:
FILE: src/agents/extensions/experimental/codex/codex_options.py
class CodexOptions (line 11) | class CodexOptions:
function coerce_codex_options (line 24) | def coerce_codex_options(
FILE: src/agents/extensions/experimental/codex/codex_tool.py
class CodexToolInputItem (line 77) | class CodexToolInputItem(BaseModel):
method validate_item (line 85) | def validate_item(self) -> CodexToolInputItem:
class CodexToolParameters (line 107) | class CodexToolParameters(BaseModel):
method validate_thread_id (line 126) | def validate_thread_id(self) -> CodexToolParameters:
class CodexToolRunContextParameters (line 138) | class CodexToolRunContextParameters(BaseModel):
class OutputSchemaPrimitive (line 150) | class OutputSchemaPrimitive(TypedDict, total=False):
class OutputSchemaArray (line 156) | class OutputSchemaArray(TypedDict, total=False):
class OutputSchemaPropertyDescriptor (line 165) | class OutputSchemaPropertyDescriptor(TypedDict, total=False):
class OutputSchemaDescriptor (line 171) | class OutputSchemaDescriptor(TypedDict, total=False):
class CodexToolResult (line 179) | class CodexToolResult:
method as_dict (line 184) | def as_dict(self) -> dict[str, Any]:
method __str__ (line 191) | def __str__(self) -> str:
class CodexToolStreamEvent (line 196) | class CodexToolStreamEvent(_DictLike):
class CodexToolOptions (line 203) | class CodexToolOptions:
class CodexToolCallArguments (line 225) | class CodexToolCallArguments(TypedDict):
class _UnsetType (line 230) | class _UnsetType:
function codex_tool (line 237) | def codex_tool(
function _coerce_tool_options (line 431) | def _coerce_tool_options(
function _validate_run_context_thread_id_key (line 459) | def _validate_run_context_thread_id_key(value: Any) -> str:
function _resolve_codex_tool_name (line 470) | def _resolve_codex_tool_name(configured_name: str | None) -> str:
function _resolve_run_context_thread_id_key (line 490) | def _resolve_run_context_thread_id_key(
function _normalize_name_for_context_key (line 507) | def _normalize_name_for_context_key(value: str) -> str:
function _validate_default_run_context_thread_id_suffix (line 514) | def _validate_default_run_context_thread_id_suffix(value: str) -> str:
function _parse_tool_input (line 533) | def _parse_tool_input(parameters_model: type[BaseModel], input_json: str...
function _normalize_parameters (line 549) | def _normalize_parameters(params: BaseModel) -> CodexToolCallArguments:
function _build_codex_input (line 570) | def _build_codex_input(args: CodexToolCallArguments) -> Input:
function _resolve_codex_options (line 576) | def _resolve_codex_options(
function _resolve_default_codex_api_key (line 599) | def _resolve_default_codex_api_key(options: CodexOptions | None) -> str ...
function _create_codex_resolver (line 623) | def _create_codex_resolver(
function _resolve_thread_options (line 644) | def _resolve_thread_options(
function _build_turn_options (line 668) | def _build_turn_options(
function _resolve_output_schema (line 687) | def _resolve_output_schema(
function _looks_like_descriptor (line 707) | def _looks_like_descriptor(option: Mapping[str, Any]) -> bool:
function _validate_descriptor (line 714) | def _validate_descriptor(option: Mapping[str, Any]) -> OutputSchemaDescr...
function _is_valid_field (line 743) | def _is_valid_field(field: Any) -> bool:
function _build_codex_output_schema (line 760) | def _build_codex_output_schema(descriptor: OutputSchemaDescriptor) -> di...
function _build_codex_output_schema_field (line 786) | def _build_codex_output_schema_field(field: OutputSchemaField) -> dict[s...
function _get_thread (line 803) | def _get_thread(codex: Codex, thread_id: str | None, defaults: ThreadOpt...
function _normalize_thread_id (line 809) | def _normalize_thread_id(value: Any) -> str | None:
function _resolve_call_thread_id (line 821) | def _resolve_call_thread_id(
function _read_thread_id_from_run_context (line 840) | def _read_thread_id_from_run_context(ctx: RunContextWrapper[Any], key: s...
function _validate_run_context_thread_id_context (line 862) | def _validate_run_context_thread_id_context(ctx: RunContextWrapper[Any],...
function _store_thread_id_in_run_context (line 913) | def _store_thread_id_in_run_context(
function _try_store_thread_id_in_run_context_after_error (line 944) | def _try_store_thread_id_in_run_context_after_error(
function _set_pydantic_context_value (line 960) | def _set_pydantic_context_value(context: BaseModel, key: str, value: str...
function _get_or_create_persisted_thread (line 989) | def _get_or_create_persisted_thread(
function _to_agent_usage (line 1008) | def _to_agent_usage(usage: Usage) -> AgentsUsage:
function _consume_events (line 1019) | async def _consume_events(
function _handle_item_started (line 1118) | def _handle_item_started(
function _handle_item_updated (line 1182) | def _handle_item_updated(
function _handle_item_completed (line 1200) | def _handle_item_completed(
function _truncate_span_string (line 1237) | def _truncate_span_string(value: str, max_chars: int | None) -> str:
function _json_char_size (line 1252) | def _json_char_size(value: Any) -> int:
function _drop_empty_string_fields (line 1259) | def _drop_empty_string_fields(data: dict[str, Any]) -> dict[str, Any]:
function _stringify_span_value (line 1263) | def _stringify_span_value(value: Any) -> str:
function _maybe_as_dict (line 1274) | def _maybe_as_dict(value: Any) -> Any:
function _truncate_span_value (line 1284) | def _truncate_span_value(value: Any, max_chars: int | None) -> Any:
function _enforce_span_data_budget (line 1307) | def _enforce_span_data_budget(data: dict[str, Any], max_chars: int | Non...
function _merge_span_data (line 1422) | def _merge_span_data(
function _apply_span_updates (line 1431) | def _apply_span_updates(
function _update_command_span (line 1443) | def _update_command_span(
function _update_mcp_tool_span (line 1461) | def _update_mcp_tool_span(
function _update_reasoning_span (line 1478) | def _update_reasoning_span(span: Any, item: ReasoningItem, span_data_max...
function _build_default_response (line 1486) | def _build_default_response(args: CodexToolCallArguments) -> str:
function _is_command_execution_item (line 1491) | def _is_command_execution_item(item: ThreadItem) -> TypeGuard[CommandExe...
function _is_mcp_tool_call_item (line 1495) | def _is_mcp_tool_call_item(item: ThreadItem) -> TypeGuard[McpToolCallItem]:
function _is_reasoning_item (line 1499) | def _is_reasoning_item(item: ThreadItem) -> TypeGuard[ReasoningItem]:
FILE: src/agents/extensions/experimental/codex/events.py
class ThreadStartedEvent (line 16) | class ThreadStartedEvent(_DictLike):
class TurnStartedEvent (line 22) | class TurnStartedEvent(_DictLike):
class Usage (line 27) | class Usage(_DictLike):
class TurnCompletedEvent (line 34) | class TurnCompletedEvent(_DictLike):
class ThreadError (line 40) | class ThreadError(_DictLike):
class TurnFailedEvent (line 45) | class TurnFailedEvent(_DictLike):
class ItemStartedEvent (line 51) | class ItemStartedEvent(_DictLike):
class ItemUpdatedEvent (line 57) | class ItemUpdatedEvent(_DictLike):
class ItemCompletedEvent (line 63) | class ItemCompletedEvent(_DictLike):
class ThreadErrorEvent (line 69) | class ThreadErrorEvent(_DictLike):
class _UnknownThreadEvent (line 75) | class _UnknownThreadEvent(_DictLike):
function _coerce_thread_error (line 93) | def _coerce_thread_error(raw: ThreadError | Mapping[str, Any]) -> Thread...
function coerce_usage (line 101) | def coerce_usage(raw: Usage | Mapping[str, Any]) -> Usage:
function coerce_thread_event (line 113) | def coerce_thread_event(raw: ThreadEvent | Mapping[str, Any]) -> ThreadE...
FILE: src/agents/extensions/experimental/codex/exec.py
class CodexExecArgs (line 26) | class CodexExecArgs:
class CodexExec (line 47) | class CodexExec:
method __init__ (line 48) | def __init__(
method run (line 61) | async def run(self, args: CodexExecArgs) -> AsyncGenerator[str, None]:
method _build_env (line 211) | def _build_env(self, args: CodexExecArgs) -> dict[str, str]:
function _watch_signal (line 231) | async def _watch_signal(signal: asyncio.Event, process: asyncio.subproce...
function _platform_target_triple (line 237) | def _platform_target_triple() -> str:
function find_codex_path (line 261) | def find_codex_path() -> str:
function _resolve_subprocess_stream_limit_bytes (line 279) | def _resolve_subprocess_stream_limit_bytes(explicit_value: int | None) -...
function _validate_subprocess_stream_limit_bytes (line 296) | def _validate_subprocess_stream_limit_bytes(value: int) -> int:
FILE: src/agents/extensions/experimental/codex/items.py
class CommandExecutionItem (line 25) | class CommandExecutionItem(_DictLike):
class FileUpdateChange (line 35) | class FileUpdateChange(_DictLike):
class FileChangeItem (line 41) | class FileChangeItem(_DictLike):
class McpToolCallResult (line 49) | class McpToolCallResult(_DictLike):
class McpToolCallError (line 55) | class McpToolCallError(_DictLike):
class McpToolCallItem (line 60) | class McpToolCallItem(_DictLike):
class AgentMessageItem (line 72) | class AgentMessageItem(_DictLike):
class ReasoningItem (line 79) | class ReasoningItem(_DictLike):
class WebSearchItem (line 86) | class WebSearchItem(_DictLike):
class ErrorItem (line 93) | class ErrorItem(_DictLike):
class TodoItem (line 100) | class TodoItem(_DictLike):
class TodoListItem (line 106) | class TodoListItem(_DictLike):
class _UnknownThreadItem (line 113) | class _UnknownThreadItem(_DictLike):
function is_agent_message_item (line 132) | def is_agent_message_item(item: ThreadItem) -> TypeGuard[AgentMessageItem]:
function _coerce_file_update_change (line 136) | def _coerce_file_update_change(
function _coerce_mcp_tool_call_result (line 149) | def _coerce_mcp_tool_call_result(
function _coerce_mcp_tool_call_error (line 163) | def _coerce_mcp_tool_call_error(
function coerce_thread_item (line 173) | def coerce_thread_item(raw: ThreadItem | Mapping[str, Any]) -> ThreadItem:
FILE: src/agents/extensions/experimental/codex/output_schema_file.py
class OutputSchemaFile (line 14) | class OutputSchemaFile:
function _is_plain_json_object (line 20) | def _is_plain_json_object(schema: Any) -> bool:
function create_output_schema_file (line 24) | def create_output_schema_file(schema: dict[str, Any] | None) -> OutputSc...
FILE: src/agents/extensions/experimental/codex/payloads.py
class _DictLike (line 8) | class _DictLike:
method __getitem__ (line 9) | def __getitem__(self, key: str) -> Any:
method get (line 14) | def get(self, key: str, default: Any = None) -> Any:
method __contains__ (line 19) | def __contains__(self, key: object) -> bool:
method keys (line 24) | def keys(self) -> Iterable[str]:
method as_dict (line 27) | def as_dict(self) -> dict[str, Any]:
method _field_names (line 30) | def _field_names(self) -> list[str]:
FILE: src/agents/extensions/experimental/codex/thread.py
function _aclosing (line 31) | async def _aclosing(
class TextInput (line 40) | class TextInput(TypedDict):
class LocalImageInput (line 45) | class LocalImageInput(TypedDict):
class Turn (line 55) | class Turn:
class StreamedTurn (line 65) | class StreamedTurn:
class Thread (line 72) | class Thread:
method __init__ (line 73) | def __init__(
method id (line 87) | def id(self) -> str | None:
method run_streamed (line 90) | async def run_streamed(
method _run_streamed_internal (line 96) | async def _run_streamed_internal(
method run (line 162) | async def run(self, input: Input, turn_options: TurnOptions | None = N...
function _normalize_input (line 191) | def _normalize_input(input: Input) -> tuple[str, list[str]]:
function _parse_event (line 210) | def _parse_event(raw: str) -> ThreadEvent:
FILE: src/agents/extensions/experimental/codex/thread_options.py
class ThreadOptions (line 18) | class ThreadOptions:
function coerce_thread_options (line 41) | def coerce_thread_options(
FILE: src/agents/extensions/experimental/codex/turn_options.py
class TurnOptions (line 14) | class TurnOptions:
function coerce_turn_options (line 23) | def coerce_turn_options(
FILE: src/agents/extensions/handoff_filters.py
function remove_all_tools (line 29) | def remove_all_tools(handoff_input_data: HandoffInputData) -> HandoffInp...
function _remove_tools_from_items (line 49) | def _remove_tools_from_items(items: tuple[RunItem, ...]) -> tuple[RunIte...
function _remove_tool_types_from_input (line 66) | def _remove_tool_types_from_input(
FILE: src/agents/extensions/handoff_prompt.py
function prompt_with_handoff_instructions (line 15) | def prompt_with_handoff_instructions(prompt: str) -> str:
FILE: src/agents/extensions/memory/__init__.py
function __getattr__ (line 37) | def __getattr__(name: str) -> Any:
FILE: src/agents/extensions/memory/advanced_sqlite_session.py
class AdvancedSQLiteSession (line 20) | class AdvancedSQLiteSession(SQLiteSession):
method __init__ (line 23) | def __init__(
method _init_structure_tables (line 53) | def _init_structure_tables(self):
method add_items (line 125) | async def add_items(self, items: list[TResponseInputItem]) -> None:
method get_items (line 138) | async def get_items(
method store_run_usage (line 249) | async def store_run_usage(self, result: RunResult) -> None:
method _get_next_turn_number (line 267) | def _get_next_turn_number(self, branch_id: str) -> int:
method _get_next_branch_turn_number (line 290) | def _get_next_branch_turn_number(self, branch_id: str) -> int:
method _get_current_turn_number (line 313) | def _get_current_turn_number(self) -> int:
method _add_structure_metadata (line 332) | async def _add_structure_metadata(self, items: list[TResponseInputItem...
method _cleanup_orphaned_messages (line 444) | async def _cleanup_orphaned_messages(self) -> int:
method _classify_message_type (line 488) | def _classify_message_type(self, item: TResponseInputItem) -> str:
method _extract_tool_name (line 506) | def _extract_tool_name(self, item: TResponseInputItem) -> str | None:
method _is_user_message (line 560) | def _is_user_message(self, item: TResponseInputItem) -> bool:
method create_branch_from_turn (line 571) | async def create_branch_from_turn(
method create_branch_from_content (line 637) | async def create_branch_from_content(
method switch_to_branch (line 660) | async def switch_to_branch(self, branch_id: str) -> None:
method delete_branch (line 693) | async def delete_branch(self, branch_id: str, force: bool = False) -> ...
method list_branches (line 773) | async def list_branches(self) -> list[dict[str, Any]]:
method _copy_messages_to_new_branch (line 821) | async def _copy_messages_to_new_branch(self, new_branch_id: str, from_...
method get_conversation_turns (line 905) | async def get_conversation_turns(self, branch_id: str | None = None) -...
method find_turns_by_content (line 962) | async def find_turns_by_content(
method get_conversation_by_turns (line 1018) | async def get_conversation_by_turns(
method get_tool_usage (line 1056) | async def get_tool_usage(self, branch_id: str | None = None) -> list[t...
method get_session_usage (line 1114) | async def get_session_usage(self, branch_id: str | None = None) -> dic...
method get_turn_usage (line 1174) | async def get_turn_usage(
method _update_turn_usage_internal (line 1280) | async def _update_turn_usage_internal(self, user_turn_number: int, usa...
FILE: src/agents/extensions/memory/async_sqlite_session.py
class AsyncSQLiteSession (line 17) | class AsyncSQLiteSession(SessionABC):
method __init__ (line 27) | def __init__(
method _init_db_for_connection (line 51) | async def _init_db_for_connection(self, conn: aiosqlite.Connection) ->...
method _get_connection (line 85) | async def _get_connection(self) -> aiosqlite.Connection:
method _locked_connection (line 99) | async def _locked_connection(self) -> AsyncIterator[aiosqlite.Connecti...
method get_items (line 105) | async def get_items(self, limit: int | None = None) -> list[TResponseI...
method add_items (line 153) | async def add_items(self, items: list[TResponseInputItem]) -> None:
method pop_item (line 189) | async def pop_item(self) -> TResponseInputItem | None:
method clear_session (line 223) | async def clear_session(self) -> None:
method close (line 236) | async def close(self) -> None:
FILE: src/agents/extensions/memory/dapr_session.py
class DaprSession (line 57) | class DaprSession(SessionABC):
method __init__ (line 62) | def __init__(
method from_address (line 101) | def from_address(
method _get_read_metadata (line 141) | def _get_read_metadata(self) -> dict[str, str]:
method _get_state_options (line 152) | def _get_state_options(self, *, concurrency: Concurrency | None = None...
method _get_metadata (line 165) | def _get_metadata(self) -> dict[str, str]:
method _serialize_item (line 172) | async def _serialize_item(self, item: TResponseInputItem) -> str:
method _deserialize_item (line 176) | async def _deserialize_item(self, item: str) -> TResponseInputItem:
method _decode_messages (line 180) | def _decode_messages(self, data: bytes | None) -> list[Any]:
method _calculate_retry_delay (line 192) | def _calculate_retry_delay(self, attempt: int) -> float:
method _is_concurrency_conflict (line 198) | def _is_concurrency_conflict(self, error: Exception) -> bool:
method _handle_concurrency_conflict (line 221) | async def _handle_concurrency_conflict(self, error: Exception, attempt...
method get_items (line 235) | async def get_items(self, limit: int | None = None) -> list[TResponseI...
method add_items (line 274) | async def add_items(self, items: list[TResponseInputItem]) -> None:
method pop_item (line 327) | async def pop_item(self) -> TResponseInputItem | None:
method clear_session (line 371) | async def clear_session(self) -> None:
method close (line 387) | async def close(self) -> None:
method __aenter__ (line 397) | async def __aenter__(self) -> DaprSession:
method __aexit__ (line 401) | async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
method ping (line 405) | async def ping(self) -> bool:
FILE: src/agents/extensions/memory/encrypt_session.py
class EncryptedEnvelope (line 44) | class EncryptedEnvelope(TypedDict):
function _ensure_fernet_key_bytes (line 53) | def _ensure_fernet_key_bytes(master_key: str) -> bytes:
function _derive_session_fernet_key (line 69) | def _derive_session_fernet_key(master_key_bytes: bytes, session_id: str)...
function _to_json_bytes (line 80) | def _to_json_bytes(obj: Any) -> bytes:
function _from_json_bytes (line 84) | def _from_json_bytes(data: bytes) -> Any:
function _is_encrypted_envelope (line 88) | def _is_encrypted_envelope(item: object) -> TypeGuard[EncryptedEnvelope]:
class EncryptedSession (line 99) | class EncryptedSession(SessionABC):
method __init__ (line 113) | def __init__(
method __getattr__ (line 136) | def __getattr__(self, name):
method session_settings (line 140) | def session_settings(self) -> SessionSettings | None:
method session_settings (line 145) | def session_settings(self, value: SessionSettings | None) -> None:
method _wrap (line 149) | def _wrap(self, item: TResponseInputItem) -> EncryptedEnvelope:
method _unwrap (line 162) | def _unwrap(self, item: TResponseInputItem | EncryptedEnvelope) -> TRe...
method get_items (line 173) | async def get_items(self, limit: int | None = None) -> list[TResponseI...
method add_items (line 182) | async def add_items(self, items: list[TResponseInputItem]) -> None:
method pop_item (line 186) | async def pop_item(self) -> TResponseInputItem | None:
method clear_session (line 195) | async def clear_session(self) -> None:
FILE: src/agents/extensions/memory/redis_session.py
class RedisSession (line 42) | class RedisSession(SessionABC):
method __init__ (line 47) | def __init__(
method from_url (line 82) | def from_url(
method _serialize_item (line 118) | async def _serialize_item(self, item: TResponseInputItem) -> str:
method _deserialize_item (line 122) | async def _deserialize_item(self, item: str) -> TResponseInputItem:
method _get_next_id (line 126) | async def _get_next_id(self) -> int:
method _set_ttl_if_configured (line 131) | async def _set_ttl_if_configured(self, *keys: str) -> None:
method get_items (line 143) | async def get_items(self, limit: int | None = None) -> list[TResponseI...
method add_items (line 182) | async def add_items(self, items: list[TResponseInputItem]) -> None:
method pop_item (line 224) | async def pop_item(self) -> TResponseInputItem | None:
method clear_session (line 248) | async def clear_session(self) -> None:
method close (line 258) | async def close(self) -> None:
method ping (line 268) | async def ping(self) -> bool:
FILE: src/agents/extensions/memory/sqlalchemy_session.py
class SQLAlchemySession (line 55) | class SQLAlchemySession(SessionABC):
method _get_table_init_lock (line 66) | def _get_table_init_lock(
method __init__ (line 81) | def __init__(
method from_url (line 168) | def from_url(
method _serialize_item (line 196) | async def _serialize_item(self, item: TResponseInputItem) -> str:
method _deserialize_item (line 200) | async def _deserialize_item(self, item: str) -> TResponseInputItem:
method _ensure_tables (line 207) | async def _ensure_tables(self) -> None:
method get_items (line 227) | async def get_items(self, limit: int | None = None) -> list[TResponseI...
method add_items (line 279) | async def add_items(self, items: list[TResponseInputItem]) -> None:
method pop_item (line 326) | async def pop_item(self) -> TResponseInputItem | None:
method clear_session (line 363) | async def clear_session(self) -> None:
method engine (line 376) | def engine(self) -> AsyncEngine:
FILE: src/agents/extensions/models/litellm_model.py
function _patch_litellm_serializer_warnings (line 62) | def _patch_litellm_serializer_warnings() -> None:
class InternalChatCompletionMessage (line 121) | class InternalChatCompletionMessage(ChatCompletionMessage):
class InternalToolCall (line 130) | class InternalToolCall(ChatCompletionMessageFunctionToolCall):
class LitellmModel (line 139) | class LitellmModel(Model):
method __init__ (line 145) | def __init__(
method get_retry_advice (line 157) | def get_retry_advice(self, request: ModelRetryAdviceRequest) -> ModelR...
method get_response (line 162) | async def get_response(
method stream_response (line 276) | async def stream_response(
method _fetch_response (line 339) | async def _fetch_response(
method _fetch_response (line 354) | async def _fetch_response(
method _fetch_response (line 368) | async def _fetch_response(
method _convert_gemini_extra_content_to_provider_specific_fields (line 562) | def _convert_gemini_extra_content_to_provider_specific_fields(
method _fix_tool_message_ordering (line 621) | def _fix_tool_message_ordering(
method _remove_not_given (line 736) | def _remove_not_given(self, value: Any) -> Any:
method _merge_headers (line 741) | def _merge_headers(self, model_settings: ModelSettings):
class LitellmConverter (line 745) | class LitellmConverter:
method convert_message_to_openai (line 747) | def convert_message_to_openai(
method convert_annotations_to_openai (line 813) | def convert_annotations_to_openai(
method convert_tool_call_to_openai (line 836) | def convert_tool_call_to_openai(
FILE: src/agents/extensions/models/litellm_provider.py
class LitellmProvider (line 9) | class LitellmProvider(ModelProvider):
method get_model (line 22) | def get_model(self, model_name: str | None) -> Model:
FILE: src/agents/extensions/tool_output_trimmer.py
class ToolOutputTrimmer (line 44) | class ToolOutputTrimmer:
method __post_init__ (line 70) | def __post_init__(self) -> None:
method __call__ (line 81) | def __call__(self, data: CallModelData[Any]) -> ModelInputData:
method _find_recent_boundary (line 147) | def _find_recent_boundary(self, items: list[Any]) -> int:
method _build_call_id_to_names (line 165) | def _build_call_id_to_names(self, items: list[Any]) -> dict[str, tuple...
method _trim_function_call_output (line 186) | def _trim_function_call_output(
method _trim_tool_search_output (line 212) | def _trim_tool_search_output(self, item: dict[str, Any]) -> tuple[dict...
method _trim_legacy_tool_search_results (line 234) | def _trim_legacy_tool_search_results(
method _trim_tool_search_tool (line 256) | def _trim_tool_search_tool(self, tool: Any) -> Any:
method _trim_json_schema (line 277) | def _trim_json_schema(self, schema: dict[str, Any]) -> dict[str, Any]:
method _serialize_json_like (line 294) | def _serialize_json_like(self, value: Any) -> str:
FILE: src/agents/extensions/visualization.py
function get_main_graph (line 9) | def get_main_graph(agent: Agent) -> str:
function get_all_nodes (line 33) | def get_all_nodes(
function get_all_edges (line 98) | def get_all_edges(
function draw_graph (line 147) | def draw_graph(agent: Agent, filename: str | None = None) -> graphviz.So...
FILE: src/agents/function_schema.py
class FuncSchema (line 21) | class FuncSchema:
method to_call_args (line 42) | def to_call_args(self, data: BaseModel) -> tuple[list[Any], dict[str, ...
class FuncDocumentation (line 78) | class FuncDocumentation:
function _detect_docstring_style (line 94) | def _detect_docstring_style(doc: str) -> DocstringStyle:
function _suppress_griffe_logging (line 135) | def _suppress_griffe_logging():
function generate_func_documentation (line 146) | def generate_func_documentation(
function _strip_annotated (line 188) | def _strip_annotated(annotation: Any) -> tuple[Any, tuple[Any, ...]]:
function _extract_description_from_metadata (line 204) | def _extract_description_from_metadata(metadata: tuple[Any, ...]) -> str...
function _extract_field_info_from_metadata (line 213) | def _extract_field_info_from_metadata(metadata: tuple[Any, ...]) -> Fiel...
function function_schema (line 222) | def function_schema(
FILE: src/agents/guardrail.py
class GuardrailFunctionOutput (line 20) | class GuardrailFunctionOutput:
class InputGuardrailResult (line 36) | class InputGuardrailResult:
class OutputGuardrailResult (line 49) | class OutputGuardrailResult:
class InputGuardrail (line 72) | class InputGuardrail(Generic[TContext]):
method get_name (line 105) | def get_name(self) -> str:
method run (line 111) | async def run(
class OutputGuardrail (line 134) | class OutputGuardrail(Generic[TContext]):
method get_name (line 159) | def get_name(self) -> str:
method run (line 165) | async def run(
function input_guardrail (line 202) | def input_guardrail(
function input_guardrail (line 208) | def input_guardrail(
function input_guardrail (line 214) | def input_guardrail(
function input_guardrail (line 224) | def input_guardrail(
function output_guardrail (line 284) | def output_guardrail(
function output_guardrail (line 290) | def output_guardrail(
function output_guardrail (line 296) | def output_guardrail(
function output_guardrail (line 305) | def output_guardrail(
FILE: src/agents/handoffs/__init__.py
class HandoffInputData (line 43) | class HandoffInputData:
method clone (line 73) | def clone(self, **kwargs: Any) -> HandoffInputData:
class Handoff (line 94) | class Handoff(Generic[TContext, TAgent]):
method get_transfer_message (line 162) | def get_transfer_message(self, agent: AgentBase[Any]) -> str:
method default_tool_name (line 166) | def default_tool_name(cls, agent: AgentBase[Any]) -> str:
method default_tool_description (line 170) | def default_tool_description(cls, agent: AgentBase[Any]) -> str:
function handoff (line 178) | def handoff(
function handoff (line 190) | def handoff(
function handoff (line 204) | def handoff(
function handoff (line 216) | def handoff(
FILE: src/agents/handoffs/history.py
function set_conversation_history_wrappers (line 40) | def set_conversation_history_wrappers(
function reset_conversation_history_wrappers (line 57) | def reset_conversation_history_wrappers() -> None:
function get_conversation_history_wrappers (line 65) | def get_conversation_history_wrappers() -> tuple[str, str]:
function nest_handoff_history (line 71) | def nest_handoff_history(
function default_handoff_history_mapper (line 115) | def default_handoff_history_mapper(
function _normalize_input_history (line 124) | def _normalize_input_history(
function _run_item_to_plain_input (line 132) | def _run_item_to_plain_input(run_item: RunItem) -> TResponseInputItem:
function _build_summary_message (line 136) | def _build_summary_message(transcript: list[TResponseInputItem]) -> TRes...
function _format_transcript_item (line 161) | def _format_transcript_item(item: TResponseInputItem) -> str:
function _stringify_content (line 180) | def _stringify_content(content: Any) -> str:
function _flatten_nested_history_messages (line 191) | def _flatten_nested_history_messages(
function _extract_nested_history_transcript (line 204) | def _extract_nested_history_transcript(
function _parse_summary_line (line 226) | def _parse_summary_line(line: str) -> TResponseInputItem | None:
function _split_role_and_name (line 249) | def _split_role_and_name(role_text: str) -> tuple[str, str | None]:
function _should_forward_pre_item (line 259) | def _should_forward_pre_item(input_item: TResponseInputItem) -> bool:
function _should_forward_new_item (line 268) | def _should_forward_new_item(input_item: TResponseInputItem) -> bool:
FILE: src/agents/items.py
class RunItemBase (line 90) | class RunItemBase(Generic[T], abc.ABC):
method __post_init__ (line 106) | def __post_init__(self) -> None:
method __getattribute__ (line 110) | def __getattribute__(self, name: str) -> Any:
method release_agent (line 115) | def release_agent(self) -> None:
method _get_agent_via_weakref (line 126) | def _get_agent_via_weakref(self, attr_name: str, ref_name: str) -> Any:
method to_input_item (line 143) | def to_input_item(self) -> TResponseInputItem:
class MessageOutputItem (line 156) | class MessageOutputItem(RunItemBase[ResponseOutputMessage]):
class ToolSearchCallItem (line 166) | class ToolSearchCallItem(RunItemBase[ToolSearchCallRawItem]):
method to_input_item (line 174) | def to_input_item(self) -> TResponseInputItem:
class ToolSearchOutputItem (line 180) | class ToolSearchOutputItem(RunItemBase[ToolSearchOutputRawItem]):
method to_input_item (line 188) | def to_input_item(self) -> TResponseInputItem:
function _tool_search_item_to_input_item (line 193) | def _tool_search_item_to_input_item(
function _output_item_to_input_item (line 208) | def _output_item_to_input_item(raw_item: Any) -> TResponseInputItem:
function _copy_tool_search_mapping (line 224) | def _copy_tool_search_mapping(raw_item: Mapping[str, Any]) -> dict[str, ...
function coerce_tool_search_call_raw_item (line 232) | def coerce_tool_search_call_raw_item(raw_item: Any) -> ToolSearchCallRaw...
function coerce_tool_search_output_raw_item (line 247) | def coerce_tool_search_output_raw_item(raw_item: Any) -> ToolSearchOutpu...
class HandoffCallItem (line 265) | class HandoffCallItem(RunItemBase[ResponseFunctionToolCall]):
class HandoffOutputItem (line 275) | class HandoffOutputItem(RunItemBase[TResponseInputItem]):
method __post_init__ (line 300) | def __post_init__(self) -> None:
method __getattribute__ (line 306) | def __getattribute__(self, name: str) -> Any:
method release_agent (line 316) | def release_agent(self) -> None:
class ToolCallItem (line 347) | class ToolCallItem(RunItemBase[Any]):
class ToolCallOutputItem (line 372) | class ToolCallOutputItem(RunItemBase[Any]):
method to_input_item (line 385) | def to_input_item(self) -> TResponseInputItem:
class ReasoningItem (line 416) | class ReasoningItem(RunItemBase[ResponseReasoningItem]):
class MCPListToolsItem (line 426) | class MCPListToolsItem(RunItemBase[McpListTools]):
class MCPApprovalRequestItem (line 436) | class MCPApprovalRequestItem(RunItemBase[McpApprovalRequest]):
class MCPApprovalResponseItem (line 446) | class MCPApprovalResponseItem(RunItemBase[McpApprovalResponse]):
class CompactionItem (line 456) | class CompactionItem(RunItemBase[TResponseInputItem]):
method to_input_item (line 461) | def to_input_item(self) -> TResponseInputItem:
class ToolApprovalItem (line 477) | class ToolApprovalItem(RunItemBase[Any]):
method __post_init__ (line 503) | def __post_init__(self) -> None:
method __hash__ (line 534) | def __hash__(self) -> int:
method __eq__ (line 538) | def __eq__(self, other: object) -> bool:
method name (line 543) | def name(self) -> str | None:
method qualified_name (line 556) | def qualified_name(self) -> str | None:
method arguments (line 563) | def arguments(self) -> str | None:
method _extract_call_id (line 585) | def _extract_call_id(self) -> str | None:
method call_id (line 592) | def call_id(self) -> str | None:
method to_input_item (line 596) | def to_input_item(self) -> TResponseInputItem:
class ModelResponse (line 624) | class ModelResponse:
method to_input_items (line 641) | def to_input_items(self) -> list[TResponseInputItem]:
class ItemHelpers (line 649) | class ItemHelpers:
method extract_last_content (line 651) | def extract_last_content(cls, message: TResponseOutputItem) -> str:
method extract_last_text (line 667) | def extract_last_text(cls, message: TResponseOutputItem) -> str | None:
method extract_text (line 679) | def extract_text(cls, message: TResponseOutputItem) -> str | None:
method input_to_new_input_list (line 692) | def input_to_new_input_list(
method text_message_outputs (line 706) | def text_message_outputs(cls, items: list[RunItem]) -> str:
method text_message_output (line 715) | def text_message_output(cls, message: MessageOutputItem) -> str:
method tool_call_output_item (line 724) | def tool_call_output_item(
method _convert_tool_output (line 743) | def _convert_tool_output(cls, output: Any) -> str | ResponseFunctionCa...
method _maybe_get_output_as_structured_function_output (line 768) | def _maybe_get_output_as_structured_function_output(
method _convert_single_tool_output_pydantic_model (line 786) | def _convert_single_tool_output_pydantic_model(
FILE: src/agents/lifecycle.py
class RunHooksBase (line 13) | class RunHooksBase(Generic[TContext, TAgent]):
method on_llm_start (line 18) | async def on_llm_start(
method on_llm_end (line 28) | async def on_llm_end(
method on_agent_start (line 37) | async def on_agent_start(self, context: AgentHookContext[TContext], ag...
method on_agent_end (line 46) | async def on_agent_end(
method on_handoff (line 61) | async def on_handoff(
method on_tool_start (line 70) | async def on_tool_start(
method on_tool_end (line 79) | async def on_tool_end(
class AgentHooksBase (line 90) | class AgentHooksBase(Generic[TContext, TAgent]):
method on_start (line 97) | async def on_start(self, context: AgentHookContext[TContext], agent: T...
method on_end (line 107) | async def on_end(
method on_handoff (line 122) | async def on_handoff(
method on_tool_start (line 132) | async def on_tool_start(
method on_tool_end (line 141) | async def on_tool_end(
method on_llm_start (line 151) | async def on_llm_start(
method on_llm_end (line 161) | async def on_llm_end(
FILE: src/agents/mcp/manager.py
class _ServerCommand (line 14) | class _ServerCommand:
class _ServerWorker (line 20) | class _ServerWorker:
method __init__ (line 21) | def __init__(
method is_done (line 34) | def is_done(self) -> bool:
method connect (line 37) | async def connect(self) -> None:
method cleanup (line 40) | async def cleanup(self) -> None:
method _submit (line 43) | async def _submit(self, action: str, timeout_seconds: float | None) ->...
method _run (line 51) | async def _run(self) -> None:
function _run_with_timeout_in_task (line 71) | async def _run_with_timeout_in_task(
class MCPServerManager (line 108) | class MCPServerManager(AbstractAsyncContextManager["MCPServerManager"]):
method __init__ (line 146) | def __init__(
method active_servers (line 173) | def active_servers(self) -> list[MCPServer]:
method all_servers (line 178) | def all_servers(self) -> list[MCPServer]:
method __aenter__ (line 182) | async def __aenter__(self) -> MCPServerManager:
method __aexit__ (line 186) | async def __aexit__(self, exc_type, exc_val, exc_tb) -> bool | None:
method connect_all (line 190) | async def connect_all(self) -> list[MCPServer]:
method reconnect (line 228) | async def reconnect(self, *, failed_only: bool = True) -> list[MCPServ...
method cleanup_all (line 255) | async def cleanup_all(self) -> None:
method _run_with_timeout (line 269) | async def _run_with_timeout(
method _attempt_connect (line 274) | async def _attempt_connect(
method _refresh_active_servers (line 297) | def _refresh_active_servers(self) -> None:
method _record_failure (line 304) | def _record_failure(self, server: MCPServer, exc: BaseException, phase...
method _run_connect (line 311) | async def _run_connect(self, server: MCPServer) -> None:
method _cleanup_server (line 318) | async def _cleanup_server(self, server: MCPServer) -> None:
method _cleanup_servers (line 336) | async def _cleanup_servers(self, servers: Iterable[MCPServer]) -> None:
method _connect_all_parallel (line 349) | async def _connect_all_parallel(self, servers: list[MCPServer]) -> None:
method _get_worker (line 379) | def _get_worker(self, server: MCPServer) -> _ServerWorker:
method _remove_failed_server (line 390) | def _remove_failed_server(self, server: MCPServer) -> None:
method _servers_to_connect (line 397) | def _servers_to_connect(self, servers: Iterable[MCPServer]) -> list[MC...
method _unique_servers (line 404) | def _unique_servers(servers: Iterable[MCPServer]) -> list[MCPServer]:
FILE: src/agents/mcp/server.py
class RequireApprovalToolList (line 42) | class RequireApprovalToolList(TypedDict, total=False):
class RequireApprovalObject (line 46) | class RequireApprovalObject(TypedDict, total=False):
class _SharedSessionRequestNeedsIsolation (line 66) | class _SharedSessionRequestNeedsIsolation(Exception):
class _IsolatedSessionRetryFailed (line 70) | class _IsolatedSessionRetryFailed(Exception):
class _UnsetType (line 74) | class _UnsetType:
class MCPServer (line 97) | class MCPServer(abc.ABC):
method __init__ (line 100) | def __init__(
method connect (line 132) | async def connect(self):
method name (line 141) | def name(self) -> str:
method cleanup (line 146) | async def cleanup(self):
method list_tools (line 153) | async def list_tools(
method call_tool (line 162) | async def call_tool(
method cached_tools (line 172) | def cached_tools(self) -> list[MCPTool] | None:
method list_prompts (line 182) | async def list_prompts(
method get_prompt (line 189) | async def get_prompt(
method _normalize_needs_approval (line 196) | def _normalize_needs_approval(
method _get_needs_approval_for_tool (line 251) | def _get_needs_approval_for_tool(
method _get_failure_error_function (line 279) | def _get_failure_error_function(
class _MCPServerWithClientSession (line 288) | class _MCPServerWithClientSession(MCPServer, abc.ABC):
method cached_tools (line 292) | def cached_tools(self) -> list[MCPTool] | None:
method __init__ (line 295) | def __init__(
method _maybe_serialize_request (line 366) | async def _maybe_serialize_request(self, func: Callable[[], Awaitable[...
method _apply_tool_filter (line 372) | async def _apply_tool_filter(
method _apply_static_tool_filter (line 392) | def _apply_static_tool_filter(
method _apply_dynamic_tool_filter (line 410) | async def _apply_dynamic_tool_filter(
method create_streams (line 453) | def create_streams(
method __aenter__ (line 459) | async def __aenter__(self):
method __aexit__ (line 463) | async def __aexit__(self, exc_type, exc_value, traceback):
method invalidate_tools_cache (line 466) | def invalidate_tools_cache(self):
method _extract_http_error_from_exception (line 470) | def _extract_http_error_from_exception(self, e: BaseException) -> Exce...
method _raise_user_error_for_http_error (line 485) | def _raise_user_error_for_http_error(self, http_error: Exception) -> N...
method _run_with_retries (line 499) | async def _run_with_retries(self, func: Callable[[], Awaitable[T]]) -> T:
method connect (line 511) | async def connect(self):
method list_tools (line 580) | async def list_tools(
method call_tool (line 620) | async def call_tool(
method _validate_required_parameters (line 657) | def _validate_required_parameters(
method list_prompts (line 691) | async def list_prompts(
method get_prompt (line 701) | async def get_prompt(
method cleanup (line 711) | async def cleanup(self):
class MCPServerStdioParams (line 789) | class MCPServerStdioParams(TypedDict):
class MCPServerStdio (line 818) | class MCPServerStdio(_MCPServerWithClientSession):
method __init__ (line 824) | def __init__(
method create_streams (line 900) | def create_streams(
method name (line 907) | def name(self) -> str:
class MCPServerSseParams (line 912) | class MCPServerSseParams(TypedDict):
class MCPServerSse (line 939) | class MCPServerSse(_MCPServerWithClientSession):
method __init__ (line 945) | def __init__(
method create_streams (line 1015) | def create_streams(
method name (line 1032) | def name(self) -> str:
class MCPServerStreamableHttpParams (line 1037) | class MCPServerStreamableHttpParams(TypedDict):
class MCPServerStreamableHttp (line 1066) | class MCPServerStreamableHttp(_MCPServerWithClientSession):
method __init__ (line 1072) | def __init__(
method create_streams (line 1144) | def create_streams(
method _isolated_client_session (line 1162) | async def _isolated_client_session(self):
method _call_tool_with_session (line 1179) | async def _call_tool_with_session(
method _should_retry_in_isolated_session (line 1190) | def _should_retry_in_isolated_session(self, exc: BaseException) -> bool:
method _call_tool_with_shared_session (line 1211) | async def _call_tool_with_shared_session(
method _call_tool_with_isolated_retry (line 1230) | async def _call_tool_with_isolated_retry(
method call_tool (line 1277) | async def call_tool(
method name (line 1352) | def name(self) -> str:
method session_id (line 1357) | def session_id(self) -> str | None:
FILE: src/agents/mcp/util.py
class HttpClientFactory (line 53) | class HttpClientFactory(Protocol):
method __call__ (line 60) | def __call__(
class ToolFilterContext (line 69) | class ToolFilterContext:
class ToolFilterStatic (line 97) | class ToolFilterStatic(TypedDict):
class MCPToolMetaContext (line 117) | class MCPToolMetaContext:
function create_static_tool_filter (line 150) | def create_static_tool_filter(
class MCPUtil (line 177) | class MCPUtil:
method get_all_function_tools (line 181) | async def get_all_function_tools(
method get_function_tools (line 212) | async def get_function_tools(
method to_function_tool (line 238) | def to_function_tool(
method _merge_mcp_meta (line 292) | def _merge_mcp_meta(
method _resolve_meta (line 306) | async def _resolve_meta(
method invoke_mcp_tool (line 334) | async def invoke_mcp_tool(
FILE: src/agents/memory/openai_conversations_session.py
function start_openai_conversations_session (line 12) | async def start_openai_conversations_session(openai_client: AsyncOpenAI ...
class OpenAIConversationsSession (line 23) | class OpenAIConversationsSession(SessionABC):
method __init__ (line 26) | def __init__(
method session_id (line 42) | def session_id(self) -> str:
method session_id (line 61) | def session_id(self, value: str) -> None:
method _get_session_id (line 65) | async def _get_session_id(self) -> str:
method _clear_session_id (line 70) | async def _clear_session_id(self) -> None:
method get_items (line 73) | async def get_items(self, limit: int | None = None) -> list[TResponseI...
method add_items (line 100) | async def add_items(self, items: list[TResponseInputItem]) -> None:
method pop_item (line 110) | async def pop_item(self) -> TResponseInputItem | None:
method clear_session (line 121) | async def clear_session(self) -> None:
FILE: src/agents/memory/openai_responses_compaction_session.py
function select_compaction_candidate_items (line 27) | def select_compaction_candidate_items(
function default_should_trigger_compaction (line 51) | def default_should_trigger_compaction(context: dict[str, Any]) -> bool:
function is_openai_model_name (line 56) | def is_openai_model_name(model: str) -> bool:
class OpenAIResponsesCompactionSession (line 75) | class OpenAIResponsesCompactionSession(SessionABC, OpenAIResponsesCompac...
method __init__ (line 83) | def __init__(
method client (line 135) | def client(self) -> AsyncOpenAI:
method _resolve_compaction_mode_for_response (line 140) | def _resolve_compaction_mode_for_response(
method run_compaction (line 157) | async def run_compaction(self, args: OpenAIResponsesCompactionArgs | N...
method get_items (line 241) | async def get_items(self, limit: int | None = None) -> list[TResponseI...
method _defer_compaction (line 244) | async def _defer_compaction(self, response_id: str, store: bool | None...
method _get_deferred_compaction_response_id (line 264) | def _get_deferred_compaction_response_id(self) -> str | None:
method _clear_deferred_compaction (line 267) | def _clear_deferred_compaction(self) -> None:
method add_items (line 270) | async def add_items(self, items: list[TResponseInputItem]) -> None:
method pop_item (line 279) | async def pop_item(self) -> TResponseInputItem | None:
method clear_session (line 286) | async def clear_session(self) -> None:
method _ensure_compaction_candidates (line 292) | async def _ensure_compaction_candidates(
function _strip_orphaned_assistant_ids (line 310) | def _strip_orphaned_assistant_ids(
function _resolve_compaction_mode (line 342) | def _resolve_compaction_mode(
FILE: src/agents/memory/session.py
class Session (line 14) | class Session(Protocol):
method get_items (line 24) | async def get_items(self, limit: int | None = None) -> list[TResponseI...
method add_items (line 36) | async def add_items(self, items: list[TResponseInputItem]) -> None:
method pop_item (line 44) | async def pop_item(self) -> TResponseInputItem | None:
method clear_session (line 52) | async def clear_session(self) -> None:
class SessionABC (line 57) | class SessionABC(ABC):
method get_items (line 71) | async def get_items(self, limit: int | None = None) -> list[TResponseI...
method add_items (line 84) | async def add_items(self, items: list[TResponseInputItem]) -> None:
method pop_item (line 93) | async def pop_item(self) -> TResponseInputItem | None:
method clear_session (line 102) | async def clear_session(self) -> None:
class OpenAIResponsesCompactionArgs (line 107) | class OpenAIResponsesCompactionArgs(TypedDict, total=False):
class OpenAIResponsesCompactionAwareSession (line 132) | class OpenAIResponsesCompactionAwareSession(Session, Protocol):
method run_compaction (line 135) | async def run_compaction(self, args: OpenAIResponsesCompactionArgs | N...
function is_openai_responses_compaction_aware_session (line 140) | def is_openai_responses_compaction_aware_session(
FILE: src/agents/memory/session_settings.py
function resolve_session_limit (line 12) | def resolve_session_limit(
class SessionSettings (line 25) | class SessionSettings:
method resolve (line 35) | def resolve(self, override: SessionSettings | None) -> SessionSettings:
method to_dict (line 49) | def to_dict(self) -> dict[str, Any]:
FILE: src/agents/memory/sqlite_session.py
class SQLiteSession (line 14) | class SQLiteSession(SessionABC):
method __init__ (line 24) | def __init__(
method _get_connection (line 65) | def _get_connection(self) -> sqlite3.Connection:
method _init_db_for_connection (line 83) | def _init_db_for_connection(self, conn: sqlite3.Connection) -> None:
method get_items (line 117) | async def get_items(self, limit: int | None = None) -> list[TResponseI...
method add_items (line 173) | async def add_items(self, items: list[TResponseInputItem]) -> None:
method pop_item (line 217) | async def pop_item(self) -> TResponseInputItem | None:
method clear_session (line 258) | async def clear_session(self) -> None:
method close (line 276) | def close(self) -> None:
FILE: src/agents/model_settings.py
class _OmitTypeAnnotation (line 24) | class _OmitTypeAnnotation:
method __get_pydantic_core_schema__ (line 26) | def __get_pydantic_core_schema__(
class MCPToolChoice (line 54) | class MCPToolChoice:
class ModelSettings (line 65) | class ModelSettings:
method resolve (line 166) | def resolve(self, override: ModelSettings | None) -> ModelSettings:
method to_json_dict (line 192) | def to_json_dict(self) -> dict[str, Any]:
function _merge_retry_settings (line 196) | def _merge_retry_settings(
function _merge_backoff_settings (line 214) | def _merge_backoff_settings(
FILE: src/agents/models/_openai_retry.py
function _iter_error_chain (line 14) | def _iter_error_chain(error: Exception) -> Iterator[Exception]:
function _header_lookup (line 24) | def _header_lookup(headers: Any, key: str) -> str | None:
function _get_header_value (line 36) | def _get_header_value(error: Exception, key: str) -> str | None:
function _parse_retry_after_ms (line 52) | def _parse_retry_after_ms(value: str | None) -> float | None:
function _parse_retry_after (line 62) | def _parse_retry_after(value: str | None) -> float | None:
function _get_status_code (line 81) | def _get_status_code(error: Exception) -> int | None:
function _get_request_id (line 94) | def _get_request_id(error: Exception) -> str | None:
function _get_error_code (line 102) | def _get_error_code(error: Exception) -> str | None:
function _is_stateful_request (line 121) | def _is_stateful_request(request: ModelRetryAdviceRequest) -> bool:
function _build_normalized_error (line 125) | def _build_normalized_error(
function get_openai_retry_advice (line 146) | def get_openai_retry_advice(request: ModelRetryAdviceRequest) -> ModelRe...
FILE: src/agents/models/_openai_shared.py
function set_default_openai_key (line 18) | def set_default_openai_key(key: str) -> None:
function get_default_openai_key (line 23) | def get_default_openai_key() -> str | None:
function set_default_openai_client (line 27) | def set_default_openai_client(client: AsyncOpenAI) -> None:
function get_default_openai_client (line 32) | def get_default_openai_client() -> AsyncOpenAI | None:
function set_use_responses_by_default (line 36) | def set_use_responses_by_default(use_responses: bool) -> None:
function get_use_responses_by_default (line 41) | def get_use_responses_by_default() -> bool:
function set_use_responses_websocket_by_default (line 45) | def set_use_responses_websocket_by_default(use_responses_websocket: bool...
function get_use_responses_websocket_by_default (line 49) | def get_use_responses_websocket_by_default() -> bool:
function set_default_openai_responses_transport (line 53) | def set_default_openai_responses_transport(transport: OpenAIResponsesTra...
function get_default_openai_responses_transport (line 60) | def get_default_openai_responses_transport() -> OpenAIResponsesTransport:
FILE: src/agents/models/_retry_runtime.py
function provider_managed_retries_disabled (line 18) | def provider_managed_retries_disabled(disabled: bool) -> Iterator[None]:
function should_disable_provider_managed_retries (line 26) | def should_disable_provider_managed_retries() -> bool:
function websocket_pre_event_retries_disabled (line 31) | def websocket_pre_event_retries_disabled(disabled: bool) -> Iterator[None]:
function should_disable_websocket_pre_event_retries (line 39) | def should_disable_websocket_pre_event_retries() -> bool:
FILE: src/agents/models/chatcmpl_converter.py
class Converter (line 72) | class Converter:
method convert_tool_choice (line 74) | def convert_tool_choice(
method convert_response_format (line 100) | def convert_response_format(
method message_to_output_items (line 116) | def message_to_output_items(
method maybe_easy_input_message (line 237) | def maybe_easy_input_message(cls, item: Any) -> EasyInputMessageParam ...
method maybe_input_message (line 256) | def maybe_input_message(cls, item: Any) -> Message | None:
method maybe_file_search_call (line 272) | def maybe_file_search_call(cls, item: Any) -> ResponseFileSearchToolCa...
method maybe_function_tool_call (line 278) | def maybe_function_tool_call(cls, item: Any) -> ResponseFunctionToolCa...
method maybe_function_tool_call_output (line 284) | def maybe_function_tool_call_output(
method maybe_item_reference (line 293) | def maybe_item_reference(cls, item: Any) -> ItemReference | None:
method maybe_response_output_message (line 299) | def maybe_response_output_message(cls, item: Any) -> ResponseOutputMes...
method maybe_reasoning_message (line 310) | def maybe_reasoning_message(cls, item: Any) -> ResponseReasoningItemPa...
method extract_text_content (line 316) | def extract_text_content(
method extract_all_content (line 333) | def extract_all_content(
method items_to_messages (line 425) | def items_to_messages(
method tool_to_openai (line 808) | def tool_to_openai(cls, tool: Tool) -> ChatCompletionToolParam:
method convert_handoff_tool (line 830) | def convert_handoff_tool(cls, handoff: Handoff[Any, Any]) -> ChatCompl...
FILE: src/agents/models/chatcmpl_helpers.py
class ChatCmplHelpers (line 24) | class ChatCmplHelpers:
method is_openai (line 26) | def is_openai(cls, client: AsyncOpenAI):
method get_store_param (line 30) | def get_store_param(cls, client: AsyncOpenAI, model_settings: ModelSet...
method get_stream_options_param (line 36) | def get_stream_options_param(
method convert_logprobs_for_output_text (line 52) | def convert_logprobs_for_output_text(
method convert_logprobs_for_text_delta (line 78) | def convert_logprobs_for_text_delta(
method clean_gemini_tool_call_id (line 103) | def clean_gemini_tool_call_id(cls, tool_call_id: str, model: str | Non...
FILE: src/agents/models/chatcmpl_stream_handler.py
class Part (line 51) | class Part:
method __init__ (line 52) | def __init__(self, text: str, type: str):
class StreamingState (line 58) | class StreamingState:
class SequenceNumber (line 76) | class SequenceNumber:
method __init__ (line 77) | def __init__(self):
method get_and_increment (line 80) | def get_and_increment(self) -> int:
class ChatCmplStreamHandler (line 86) | class ChatCmplStreamHandler:
method _finish_reasoning_summary_part (line 88) | def _finish_reasoning_summary_part(
method _finish_reasoning_item (line 119) | def _finish_reasoning_item(
method handle_stream (line 149) | async def handle_stream(
FILE: src/agents/models/default_models.py
function _is_gpt_5_none_effort_model (line 28) | def _is_gpt_5_none_effort_model(model_name: str) -> bool:
function gpt_5_reasoning_settings_required (line 32) | def gpt_5_reasoning_settings_required(model_name: str) -> bool:
function is_gpt_5_default (line 43) | def is_gpt_5_default() -> bool:
function get_default_model (line 52) | def get_default_model() -> str:
function get_default_model_settings (line 59) | def get_default_model_settings(model: Optional[str] = None) -> ModelSett...
FILE: src/agents/models/interface.py
class ModelTracing (line 20) | class ModelTracing(enum.Enum):
method is_disabled (line 30) | def is_disabled(self) -> bool:
method include_data (line 33) | def include_data(self) -> bool:
class Model (line 37) | class Model(abc.ABC):
method close (line 40) | async def close(self) -> None:
method get_retry_advice (line 48) | def get_retry_advice(self, request: ModelRetryAdviceRequest) -> ModelR...
method get_response (line 57) | async def get_response(
method stream_response (line 92) | def stream_response(
class ModelProvider (line 127) | class ModelProvider(abc.ABC):
method get_model (line 134) | def get_model(self, model_name: str | None) -> Model:
method aclose (line 144) | async def aclose(self) -> None:
FILE: src/agents/models/multi_provider.py
class MultiProviderMap (line 15) | class MultiProviderMap:
method __init__ (line 18) | def __init__(self):
method has_prefix (line 21) | def has_prefix(self, prefix: str) -> bool:
method get_mapping (line 25) | def get_mapping(self) -> dict[str, ModelProvider]:
method set_mapping (line 29) | def set_mapping(self, mapping: dict[str, ModelProvider]):
method get_provider (line 33) | def get_provider(self, prefix: str) -> ModelProvider | None:
method add_provider (line 41) | def add_provider(self, prefix: str, provider: ModelProvider):
method remove_provider (line 50) | def remove_provider(self, prefix: str):
class MultiProvider (line 59) | class MultiProvider(ModelProvider):
method __init__ (line 72) | def __init__(
method _get_prefix_and_model_name (line 132) | def _get_prefix_and_model_name(self, model_name: str | None) -> tuple[...
method _create_fallback_provider (line 141) | def _create_fallback_provider(self, prefix: str) -> ModelProvider:
method _validate_openai_prefix_mode (line 150) | def _validate_openai_prefix_mode(mode: str) -> MultiProviderOpenAIPref...
method _validate_unknown_prefix_mode (line 156) | def _validate_unknown_prefix_mode(mode: str) -> MultiProviderUnknownPr...
method _get_fallback_provider (line 163) | def _get_fallback_provider(self, prefix: str | None) -> ModelProvider:
method _resolve_prefixed_model (line 172) | def _resolve_prefixed_model(
method get_model (line 197) | def get_model(self, model_name: str | None) -> Model:
method aclose (line 224) | async def aclose(self) -> None:
FILE: src/agents/models/openai_chatcompletions.py
class OpenAIChatCompletionsModel (line 48) | class OpenAIChatCompletionsModel(Model):
method __init__ (line 53) | def __init__(
method _non_null_or_omit (line 63) | def _non_null_or_omit(self, value: Any) -> Any:
method get_retry_advice (line 66) | def get_retry_advice(self, request: ModelRetryAdviceRequest) -> ModelR...
method _validate_official_openai_input_content_types (line 69) | def _validate_official_openai_input_content_types(
method get_response (line 99) | async def get_response(
method _attach_logprobs_to_output (line 200) | def _attach_logprobs_to_output(
method stream_response (line 212) | async def stream_response(
method _fetch_response (line 277) | async def _fetch_response(
method _fetch_response (line 292) | async def _fetch_response(
method _fetch_response (line 306) | async def _fetch_response(
method _get_client (line 444) | def _get_client(self) -> AsyncOpenAI:
method _merge_headers (line 453) | def _merge_headers(self, model_settings: ModelSettings):
FILE: src/agents/models/openai_provider.py
function shared_http_client (line 27) | def shared_http_client() -> httpx.AsyncClient:
class OpenAIProvider (line 34) | class OpenAIProvider(ModelProvider):
method __init__ (line 35) | def __init__(
method _get_client (line 100) | def _get_client(self) -> AsyncOpenAI:
method _get_running_loop (line 115) | def _get_running_loop(self) -> asyncio.AbstractEventLoop | None:
method _close_ws_models_for_loop (line 121) | async def _close_ws_models_for_loop(
method _close_models (line 141) | async def _close_models(self, models: list[Model]) -> None:
method _clear_ws_loop_cache_entry (line 145) | def _clear_ws_loop_cache_entry(
method _collect_unique_cached_models (line 154) | def _collect_unique_cached_models(
method _prune_closed_ws_loop_caches (line 166) | def _prune_closed_ws_loop_caches(self) -> None:
method get_model (line 178) | def get_model(self, model_name: str | None) -> Model:
method aclose (line 218) | async def aclose(self) -> None:
FILE: src/agents/models/openai_responses.py
class _NamespaceToolParam (line 91) | class _NamespaceToolParam(TypedDict):
function _json_dumps_default (line 98) | def _json_dumps_default(value: Any) -> Any:
function _is_openai_omitted_value (line 115) | def _is_openai_omitted_value(value: Any) -> bool:
function _require_responses_tool_param (line 119) | def _require_responses_tool_param(value: object) -> ResponsesToolParam:
function _is_response_includable (line 130) | def _is_response_includable(value: object) -> TypeGuard[ResponseIncludab...
function _coerce_response_includables (line 134) | def _coerce_response_includables(values: Sequence[str]) -> list[Response...
function _materialize_responses_tool_params (line 145) | def _materialize_responses_tool_params(
function _refresh_openai_client_api_key_if_supported (line 158) | async def _refresh_openai_client_api_key_if_supported(client: Any) -> None:
function _construct_response_stream_event_from_payload (line 165) | def _construct_response_stream_event_from_payload(
class _WebsocketRequestTimeouts (line 184) | class _WebsocketRequestTimeouts:
class _ResponseStreamWithRequestId (line 191) | class _ResponseStreamWithRequestId:
method __init__ (line 201) | def __init__(
method __aiter__ (line 216) | def __aiter__(self) -> _ResponseStreamWithRequestId:
method __anext__ (line 219) | async def __anext__(self) -> ResponseStreamEvent:
method aclose (line 236) | async def aclose(self) -> None:
method close (line 243) | async def close(self) -> None:
method _attach_request_id (line 246) | def _attach_request_id(self, event: ResponseStreamEvent) -> None:
method _cleanup_once (line 259) | async def _cleanup_once(self) -> None:
method _cleanup_after_exhaustion (line 265) | async def _cleanup_after_exhaustion(self) -> None:
method _close_stream_once (line 274) | async def _close_stream_once(self) -> None:
class ResponsesWebSocketError (line 291) | class ResponsesWebSocketError(RuntimeError):
method __init__ (line 294) | def __init__(self, payload: Mapping[str, Any]):
method _coerce_optional_str (line 316) | def _coerce_optional_str(value: Any) -> str | None:
function _iter_retry_error_chain (line 320) | def _iter_retry_error_chain(error: Exception):
function _get_wrapped_websocket_replay_safety (line 330) | def _get_wrapped_websocket_replay_safety(error: Exception) -> str | None:
function _did_start_websocket_response (line 335) | def _did_start_websocket_response(error: Exception) -> bool:
function _is_never_sent_websocket_error (line 339) | def _is_never_sent_websocket_error(error: Exception) -> bool:
function _is_ambiguous_websocket_replay_error (line 349) | def _is_ambiguous_websocket_replay_error(error: Exception) -> bool:
function _get_websocket_timeout_phase (line 359) | def _get_websocket_timeout_phase(error: Exception) -> str | None:
function _should_retry_pre_event_websocket_disconnect (line 370) | def _should_retry_pre_event_websocket_disconnect() -> bool:
class OpenAIResponsesModel (line 374) | class OpenAIResponsesModel(Model):
method __init__ (line 379) | def __init__(
method _non_null_or_omit (line 390) | def _non_null_or_omit(self, value: Any) -> Any:
method get_retry_advice (line 393) | def get_retry_advice(self, request: ModelRetryAdviceRequest) -> ModelR...
method _maybe_aclose_async_iterator (line 396) | async def _maybe_aclose_async_iterator(self, iterator: Any) -> None:
method _schedule_async_iterator_close (line 408) | def _schedule_async_iterator_close(self, iterator: Any) -> None:
method _consume_background_cleanup_task_result (line 413) | def _consume_background_cleanup_task_result(task: asyncio.Task[Any]) -...
method get_response (line 421) | async def get_response(
method stream_response (line 499) | async def stream_response(
method _fetch_response (line 586) | async def _fetch_response(
method _fetch_response (line 601) | async def _fetch_response(
method _fetch_response (line 615) | async def _fetch_response(
method _build_response_create_kwargs (line 670) | def _build_response_create_kwargs(
method _remove_openai_responses_api_incompatible_fields (line 816) | def _remove_openai_responses_api_incompatible_fields(self, list_input:...
method _clean_item_for_openai (line 843) | def _clean_item_for_openai(self, item: Any) -> Any | None:
method _get_client (line 862) | def _get_client(self) -> AsyncOpenAI:
method _merge_headers (line 871) | def _merge_headers(self, model_settings: ModelSettings):
class OpenAIResponsesWSModel (line 879) | class OpenAIResponsesWSModel(OpenAIResponsesModel):
method __init__ (line 889) | def __init__(
method get_retry_advice (line 908) | def get_retry_advice(self, request: ModelRetryAdviceRequest) -> ModelR...
method _get_ws_request_lock (line 965) | def _get_ws_request_lock(self) -> asyncio.Lock:
method _fetch_response (line 977) | async def _fetch_response(
method _fetch_response (line 992) | async def _fetch_response(
method _fetch_response (line 1006) | async def _fetch_response(
method _iter_websocket_response_events (line 1059) | async def _iter_websocket_response_events(
method _should_wrap_pre_event_websocket_disconnect (line 1196) | def _should_wrap_pre_event_websocket_disconnect(self, exc: Exception) ...
method _get_websocket_request_timeouts (line 1214) | def _get_websocket_request_timeouts(self, timeout: Any) -> _WebsocketR...
method _await_websocket_with_timeout (line 1237) | async def _await_websocket_with_timeout(
method _prepare_websocket_request (line 1267) | async def _prepare_websocket_request(
method _merge_websocket_headers (line 1310) | def _merge_websocket_headers(self, extra_headers: Mapping[str, Any]) -...
method _prepare_websocket_url (line 1330) | def _prepare_websocket_url(self, extra_query: Any) -> str:
method _ensure_websocket_connection (line 1371) | async def _ensure_websocket_connection(
method _is_websocket_connection_reusable (line 1402) | def _is_websocket_connection_reusable(self, connection: Any) -> bool:
method close (line 1425) | async def close(self) -> None:
method _get_current_loop_ws_request_lock (line 1437) | def _get_current_loop_ws_request_lock(self) -> asyncio.Lock | None:
method _force_abort_websocket_connection (line 1451) | def _force_abort_websocket_connection(self, connection: Any) -> None:
method _force_drop_websocket_connection_sync (line 1467) | def _force_drop_websocket_connection_sync(self) -> None:
method _clear_websocket_connection_state (line 1477) | def _clear_websocket_connection_state(self) -> None:
method _drop_websocket_connection (line 1483) | async def _drop_websocket_connection(self) -> None:
method _open_websocket_connection (line 1495) | async def _open_websocket_connection(
class ConvertedTools (line 1520) | class ConvertedTools:
class Converter (line 1525) | class Converter:
method _convert_shell_environment (line 1527) | def _convert_shell_environment(cls, environment: ShellToolEnvironment ...
method convert_tool_choice (line 1540) | def convert_tool_choice(
method _validate_required_tool_choice (line 1613) | def _validate_required_tool_choice(
method _validate_named_function_tool_choice (line 1633) | def _validate_named_function_tool_choice(
method _has_computer_tool (line 1710) | def _has_computer_tool(cls, tools: Sequence[Tool] | None) -> bool:
method _has_unresolved_computer_tool (line 1714) | def _has_unresolved_computer_tool(cls, tools: Sequence[Tool] | None) -...
method _is_preview_computer_model (line 1722) | def _is_preview_computer_model(cls, model: str | ChatModel | None) -> ...
method _is_ga_computer_model (line 1726) | def _is_ga_computer_model(cls, model: str | ChatModel | None) -> bool:
method resolve_computer_tool_model (line 1730) | def resolve_computer_tool_model(
method _should_use_preview_computer_tool (line 1741) | def _should_use_preview_computer_tool(
method _convert_builtin_computer_tool_choice (line 1761) | def _convert_builtin_computer_tool_choice(
method get_response_format (line 1783) | def get_response_format(
method convert_tools (line 1799) | def convert_tools(
method _convert_function_tool (line 1884) | def _convert_function_tool(
method _convert_preview_computer_tool (line 1902) | def _convert_preview_computer_tool(cls, tool: ComputerTool[Any]) -> Re...
method _convert_tool (line 1927) | def _convert_tool(
method _convert_handoff_tool (line 2005) | def _convert_handoff_tool(cls, handoff: Handoff) -> ResponsesToolParam:
FILE: src/agents/models/reasoning_content_replay.py
class ReasoningContentSource (line 9) | class ReasoningContentSource:
class ReasoningContentReplayContext (line 23) | class ReasoningContentReplayContext:
function default_should_replay_reasoning_content (line 39) | def default_should_replay_reasoning_content(context: ReasoningContentRep...
FILE: src/agents/prompts.py
class Prompt (line 22) | class Prompt(TypedDict):
class GenerateDynamicPromptData (line 36) | class GenerateDynamicPromptData:
function _coerce_prompt_dict (line 50) | def _coerce_prompt_dict(prompt: Prompt | dict[object, object]) -> Prompt:
class PromptUtil (line 55) | class PromptUtil:
method to_model_input (line 57) | async def to_model_input(
FILE: src/agents/realtime/_default_tracker.py
class ModelAudioState (line 11) | class ModelAudioState:
class ModelAudioTracker (line 16) | class ModelAudioTracker:
method __init__ (line 17) | def __init__(self) -> None:
method set_audio_format (line 22) | def set_audio_format(self, format: RealtimeAudioFormat) -> None:
method on_audio_delta (line 26) | def on_audio_delta(self, item_id: str, item_content_index: int, audio_...
method on_interrupted (line 37) | def on_interrupted(self) -> None:
method get_state (line 41) | def get_state(self, item_id: str, item_content_index: int) -> ModelAud...
method get_last_audio_item (line 45) | def get_last_audio_item(self) -> tuple[str, int] | None:
FILE: src/agents/realtime/_util.py
function calculate_audio_length_ms (line 10) | def calculate_audio_length_ms(format: RealtimeAudioFormat | None, audio_...
FILE: src/agents/realtime/agent.py
class RealtimeAgent (line 27) | class RealtimeAgent(AgentBase, Generic[TContext]):
method clone (line 82) | def clone(self, **kwargs: Any) -> RealtimeAgent[TContext]:
method get_system_prompt (line 90) | async def get_system_prompt(self, run_context: RunContextWrapper[TCont...
FILE: src/agents/realtime/audio_formats.py
function to_realtime_audio_format (line 16) | def to_realtime_audio_format(
FILE: src/agents/realtime/config.py
class RealtimeClientMessage (line 48) | class RealtimeClientMessage(TypedDict):
class RealtimeInputAudioTranscriptionConfig (line 58) | class RealtimeInputAudioTranscriptionConfig(TypedDict):
class RealtimeInputAudioNoiseReductionConfig (line 71) | class RealtimeInputAudioNoiseReductionConfig(TypedDict):
class RealtimeTurnDetectionConfig (line 78) | class RealtimeTurnDetectionConfig(TypedDict):
class RealtimeAudioInputConfig (line 109) | class RealtimeAudioInputConfig(TypedDict, total=False):
class RealtimeAudioOutputConfig (line 118) | class RealtimeAudioOutputConfig(TypedDict, total=False):
class RealtimeAudioConfig (line 126) | class RealtimeAudioConfig(TypedDict, total=False):
class RealtimeSessionModelSettings (line 133) | class RealtimeSessionModelSettings(TypedDict):
class RealtimeGuardrailsSettings (line 188) | class RealtimeGuardrailsSettings(TypedDict):
class RealtimeModelTracingConfig (line 199) | class RealtimeModelTracingConfig(TypedDict):
class RealtimeRunConfig (line 212) | class RealtimeRunConfig(TypedDict):
class RealtimeUserInputText (line 236) | class RealtimeUserInputText(TypedDict):
class RealtimeUserInputImage (line 246) | class RealtimeUserInputImage(TypedDict, total=False):
class RealtimeUserInputMessage (line 254) | class RealtimeUserInputMessage(TypedDict):
FILE: src/agents/realtime/events.py
class RealtimeEventInfo (line 17) | class RealtimeEventInfo:
class RealtimeAgentStartEvent (line 23) | class RealtimeAgentStartEvent:
class RealtimeAgentEndEvent (line 36) | class RealtimeAgentEndEvent:
class RealtimeHandoffEvent (line 49) | class RealtimeHandoffEvent:
class RealtimeToolStart (line 65) | class RealtimeToolStart:
class RealtimeToolEnd (line 84) | class RealtimeToolEnd:
class RealtimeToolApprovalRequired (line 106) | class RealtimeToolApprovalRequired:
class RealtimeRawModelEvent (line 128) | class RealtimeRawModelEvent:
class RealtimeAudioEnd (line 141) | class RealtimeAudioEnd:
class RealtimeAudio (line 157) | class RealtimeAudio:
class RealtimeAudioInterrupted (line 176) | class RealtimeAudioInterrupted:
class RealtimeError (line 194) | class RealtimeError:
class RealtimeHistoryUpdated (line 207) | class RealtimeHistoryUpdated:
class RealtimeHistoryAdded (line 220) | class RealtimeHistoryAdded:
class RealtimeGuardrailTripped (line 233) | class RealtimeGuardrailTripped:
class RealtimeInputAudioTimeoutTriggered (line 249) | class RealtimeInputAudioTimeoutTriggered:
FILE: src/agents/realtime/handoffs.py
function realtime_handoff (line 30) | def realtime_handoff(
function realtime_handoff (line 41) | def realtime_handoff(
function realtime_handoff (line 54) | def realtime_handoff(
function realtime_handoff (line 65) | def realtime_handoff(
FILE: src/agents/realtime/items.py
class InputText (line 8) | class InputText(BaseModel):
class InputAudio (line 21) | class InputAudio(BaseModel):
class InputImage (line 37) | class InputImage(BaseModel):
class AssistantText (line 53) | class AssistantText(BaseModel):
class AssistantAudio (line 66) | class AssistantAudio(BaseModel):
class SystemMessageItem (line 82) | class SystemMessageItem(BaseModel):
class UserMessageItem (line 104) | class UserMessageItem(BaseModel):
class AssistantMessageItem (line 126) | class AssistantMessageItem(BaseModel):
class RealtimeToolCallItem (line 158) | class RealtimeToolCallItem(BaseModel):
class RealtimeResponse (line 193) | class RealtimeResponse(BaseModel):
FILE: src/agents/realtime/model.py
class RealtimePlaybackState (line 18) | class RealtimePlaybackState(TypedDict):
class RealtimePlaybackTracker (line 29) | class RealtimePlaybackTracker:
method __init__ (line 35) | def __init__(self) -> None:
method on_play_bytes (line 41) | def on_play_bytes(self, item_id: str, item_content_index: int, bytes: ...
method on_play_ms (line 52) | def on_play_ms(self, item_id: str, item_content_index: int, ms: float)...
method on_interrupted (line 67) | def on_interrupted(self) -> None:
method set_audio_format (line 72) | def set_audio_format(self, format: RealtimeAudioFormat) -> None:
method get_state (line 80) | def get_state(self) -> RealtimePlaybackState:
class RealtimeModelListener (line 98) | class RealtimeModelListener(abc.ABC):
method on_event (line 102) | async def on_event(self, event: RealtimeModelEvent) -> None:
class RealtimeModelConfig (line 107) | class RealtimeModelConfig(TypedDict):
class RealtimeModel (line 151) | class RealtimeModel(abc.ABC):
method connect (line 155) | async def connect(self, options: RealtimeModelConfig) -> None:
method add_listener (line 160) | def add_listener(self, listener: RealtimeModelListener) -> None:
method remove_listener (line 165) | def remove_listener(self, listener: RealtimeModelListener) -> None:
method send_event (line 170) | async def send_event(self, event: RealtimeModelSendEvent) -> None:
method close (line 175) | async def close(self) -> None:
FILE: src/agents/realtime/model_events.py
class RealtimeModelErrorEvent (line 14) | class RealtimeModelErrorEvent:
class RealtimeModelToolCallEvent (line 23) | class RealtimeModelToolCallEvent:
class RealtimeModelAudioEvent (line 37) | class RealtimeModelAudioEvent:
class RealtimeModelAudioInterruptedEvent (line 53) | class RealtimeModelAudioInterruptedEvent:
class RealtimeModelAudioDoneEvent (line 66) | class RealtimeModelAudioDoneEvent:
class RealtimeModelInputAudioTranscriptionCompletedEvent (line 79) | class RealtimeModelInputAudioTranscriptionCompletedEvent:
class RealtimeModelInputAudioTimeoutTriggeredEvent (line 89) | class RealtimeModelInputAudioTimeoutTriggeredEvent:
class RealtimeModelTranscriptDeltaEvent (line 100) | class RealtimeModelTranscriptDeltaEvent:
class RealtimeModelItemUpdatedEvent (line 111) | class RealtimeModelItemUpdatedEvent:
class RealtimeModelItemDeletedEvent (line 120) | class RealtimeModelItemDeletedEvent:
class RealtimeModelConnectionStatusEvent (line 129) | class RealtimeModelConnectionStatusEvent:
class RealtimeModelTurnStartedEvent (line 138) | class RealtimeModelTurnStartedEvent:
class RealtimeModelTurnEndedEvent (line 145) | class RealtimeModelTurnEndedEvent:
class RealtimeModelOtherEvent (line 152) | class RealtimeModelOtherEvent:
class RealtimeModelExceptionEvent (line 161) | class RealtimeModelExceptionEvent:
class RealtimeModelRawServerEvent (line 171) | class RealtimeModelRawServerEvent:
FILE: src/agents/realtime/model_inputs.py
class RealtimeModelRawClientMessage (line 12) | class RealtimeModelRawClientMessage(TypedDict):
class RealtimeModelInputTextContent (line 20) | class RealtimeModelInputTextContent(TypedDict):
class RealtimeModelInputImageContent (line 27) | class RealtimeModelInputImageContent(TypedDict, total=False):
class RealtimeModelUserInputMessage (line 41) | class RealtimeModelUserInputMessage(TypedDict):
class RealtimeModelSendRawMessage (line 57) | class RealtimeModelSendRawMessage:
class RealtimeModelSendUserInput (line 65) | class RealtimeModelSendUserInput:
class RealtimeModelSendAudio (line 73) | class RealtimeModelSendAudio:
class RealtimeModelSendToolOutput (line 81) | class RealtimeModelSendToolOutput:
class RealtimeModelSendInterrupt (line 95) | class RealtimeModelSendInterrupt:
class RealtimeModelSendSessionUpdate (line 103) | class RealtimeModelSendSessionUpdate:
FILE: src/agents/realtime/openai_realtime.py
function get_api_key (line 175) | async def get_api_key(key: str | Callable[[], MaybeAwaitable[str]] | Non...
function get_server_event_type_adapter (line 195) | def get_server_event_type_adapter() -> TypeAdapter[AllRealtimeServerEven...
function _collect_enabled_handoffs (line 202) | async def _collect_enabled_handoffs(
function _build_model_settings_from_agent (line 225) | async def _build_model_settings_from_agent(
class TransportConfig (line 256) | class TransportConfig(TypedDict):
class OpenAIRealtimeWebSocketModel (line 271) | class OpenAIRealtimeWebSocketModel(RealtimeModel):
method __init__ (line 274) | def __init__(self, *, transport_config: TransportConfig | None = None)...
method connect (line 289) | async def connect(self, options: RealtimeModelConfig) -> None:
method _create_websocket_connection (line 342) | async def _create_websocket_connection(
method _send_tracing_config (line 374) | async def _send_tracing_config(
method add_listener (line 391) | def add_listener(self, listener: RealtimeModelListener) -> None:
method remove_listener (line 396) | def remove_listener(self, listener: RealtimeModelListener) -> None:
method _emit_event (line 401) | async def _emit_event(self, event: RealtimeModelEvent) -> None:
method _listen_for_messages (line 407) | async def _listen_for_messages(self):
method send_event (line 444) | async def send_event(self, event: RealtimeModelSendEvent) -> None:
method _send_raw_message (line 466) | async def _send_raw_message(self, event: OpenAIRealtimeClientEvent) ->...
method _send_user_input (line 472) | async def _send_user_input(self, event: RealtimeModelSendUserInput) ->...
method _send_audio (line 477) | async def _send_audio(self, event: RealtimeModelSendAudio) -> None:
method _send_tool_output (line 485) | async def _send_tool_output(self, event: RealtimeModelSendToolOutput) ...
method _get_playback_state (line 504) | def _get_playback_state(self) -> RealtimePlaybackState:
method _get_audio_limits (line 527) | def _get_audio_limits(self, item_id: str, item_content_index: int) -> ...
method _send_interrupt (line 534) | async def _send_interrupt(self, event: RealtimeModelSendInterrupt) -> ...
method _send_session_update (line 595) | async def _send_session_update(self, event: RealtimeModelSendSessionUp...
method _handle_audio_delta (line 599) | async def _handle_audio_delta(self, parsed: ResponseAudioDeltaEvent) -...
method _handle_output_item (line 616) | async def _handle_output_item(self, item: ConversationItem) -> None:
method _handle_conversation_item (line 655) | async def _handle_conversation_item(
method close (line 664) | async def close(self) -> None:
method _cancel_response (line 677) | async def _cancel_response(self) -> None:
method _handle_ws_event (line 682) | async def _handle_ws_event(self, event: dict[str, Any]):
method _update_created_session (line 887) | def _update_created_session(
method _normalize_session_payload (line 909) | def _normalize_session_payload(
method _is_transcription_session (line 938) | def _is_transcription_session(payload: Mapping[str, object]) -> bool:
method _extract_audio_format (line 947) | def _extract_audio_format(session: OpenAISessionCreateRequest) -> str ...
method _normalize_audio_format (line 955) | def _normalize_audio_format(fmt: object) -> str:
method _read_format_type (line 970) | def _read_format_type(fmt: object) -> str | None:
method _normalize_turn_detection_config (line 990) | def _normalize_turn_detection_config(config: object) -> object:
method _update_session_config (line 1011) | async def _update_session_config(self, model_settings: RealtimeSession...
method _get_session_config (line 1017) | def _get_session_config(
method _tools_to_session_tools (line 1151) | def _tools_to_session_tools(
class OpenAIRealtimeSIPModel (line 1184) | class OpenAIRealtimeSIPModel(OpenAIRealtimeWebSocketModel):
method build_initial_session_payload (line 1188) | async def build_initial_session_payload(
method connect (line 1223) | async def connect(self, options: RealtimeModelConfig) -> None:
class _ConversionHelper (line 1232) | class _ConversionHelper:
method conversation_item_to_realtime_message_item (line 1234) | def conversation_item_to_realtime_message_item(
method try_convert_raw_message (line 1268) | def try_convert_raw_message(
method convert_tracing_config (line 1280) | def convert_tracing_config(
method convert_user_input_to_conversation_item (line 1294) | def convert_user_input_to_conversation_item(
method convert_user_input_to_item_create (line 1350) | def convert_user_input_to_item_create(
method convert_audio_to_input_audio_buffer_append (line 1359) | def convert_audio_to_input_audio_buffer_append(
method convert_tool_output (line 1369) | def convert_tool_output(cls, event: RealtimeModelSendToolOutput) -> Op...
method convert_interrupt (line 1380) | def convert_interrupt(
FILE: src/agents/realtime/runner.py
class RealtimeRunner (line 18) | class RealtimeRunner:
method __init__ (line 30) | def __init__(
method run (line 49) | async def run(
FILE: src/agents/realtime/session.py
function _serialize_tool_output (line 73) | def _serialize_tool_output(output: Any) -> str:
class RealtimeSession (line 96) | class RealtimeSession(RealtimeModelListener):
method __init__ (line 116) | def __init__(
method model (line 166) | def model(self) -> RealtimeModel:
method __aenter__ (line 170) | async def __aenter__(self) -> RealtimeSession:
method enter (line 196) | async def enter(self) -> RealtimeSession:
method __aexit__ (line 203) | async def __aexit__(self, _exc_type: Any, _exc_val: Any, _exc_tb: Any)...
method __aiter__ (line 207) | async def __aiter__(self) -> AsyncIterator[RealtimeSessionEvent]:
method close (line 222) | async def close(self) -> None:
method send_message (line 226) | async def send_message(self, message: RealtimeUserInput) -> None:
method send_audio (line 230) | async def send_audio(self, audio: bytes, *, commit: bool = False) -> N...
method interrupt (line 234) | async def interrupt(self) -> None:
method update_agent (line 238) | async def update_agent(self, agent: RealtimeAgent) -> None:
method on_event (line 251) | async def on_event(self, event: RealtimeModelEvent) -> None:
method _put_event (line 425) | async def _put_event(self, event: RealtimeSessionEvent) -> None:
method _function_needs_approval (line 429) | async def _function_needs_approval(
method _build_tool_approval_item (line 448) | def _build_tool_approval_item(
method _maybe_request_tool_approval (line 460) | async def _maybe_request_tool_approval(
method _send_tool_rejection (line 499) | async def _send_tool_rejection(
method _resolve_approval_rejection_message (line 529) | async def _resolve_approval_rejection_message(self, *, tool: FunctionT...
method approve_tool_call (line 572) | async def approve_tool_call(self, call_id: str, *, always: bool = Fals...
method reject_tool_call (line 586) | async def reject_tool_call(
method _handle_tool_call (line 606) | async def _handle_tool_call(
method _get_new_history (line 734) | def _get_new_history(
method _run_output_guardrails (line 905) | async def _run_output_guardrails(self, text: str, response_id: str) ->...
method _enqueue_guardrail_task (line 970) | def _enqueue_guardrail_task(self, text: str, response_id: str) -> None:
method _on_guardrail_task_done (line 979) | def _on_guardrail_task_done(self, task: asyncio.Task[Any]) -> None:
method _cleanup_guardrail_tasks (line 998) | def _cleanup_guardrail_tasks(self) -> None:
method _enqueue_tool_call_task (line 1004) | def _enqueue_tool_call_task(
method _on_tool_call_task_done (line 1012) | def _on_tool_call_task_done(self, task: asyncio.Task[Any]) -> None:
method _cleanup_tool_call_tasks (line 1036) | def _cleanup_tool_call_tasks(self) -> None:
method _cleanup (line 1042) | async def _cleanup(self) -> None:
method _get_updated_model_settings_from_agent (line 1060) | async def _get_updated_model_settings_from_agent(
method _get_handoffs (line 1091) | async def _get_handoffs(
FILE: src/agents/repl.py
function run_demo_loop (line 15) | async def run_demo_loop(
FILE: src/agents/responses_websocket_session.py
class ResponsesWebSocketSession (line 23) | class ResponsesWebSocketSession:
method __post_init__ (line 29) | def __post_init__(self) -> None:
method _validate_provider_alignment (line 32) | def _validate_provider_alignment(self) -> MultiProvider:
method aclose (line 44) | async def aclose(self) -> None:
method _prepare_runner_kwargs (line 48) | def _prepare_runner_kwargs(self, method_name: str, kwargs: Mapping[str...
method run (line 58) | async def run(
method run_streamed (line 68) | def run_streamed(
function responses_websocket_session (line 80) | async def responses_websocket_session(
FILE: src/agents/result.py
class AgentToolInvocation (line 55) | class AgentToolInvocation:
function _populate_state_from_result (line 68) | def _populate_state_from_result(
function _input_items_for_result (line 116) | def _input_items_for_result(
class RunResultBase (line 148) | class RunResultBase(abc.ABC):
method __get_pydantic_core_schema__ (line 190) | def __get_pydantic_core_schema__(
method last_agent (line 201) | def last_agent(self) -> Agent[Any]:
method release_agents (line 204) | def release_agents(self, *, release_new_items: bool = True) -> None:
method __del__ (line 218) | def __del__(self) -> None:
method _release_last_agent_reference (line 230) | def _release_last_agent_reference(self) -> None:
method final_output_as (line 233) | def final_output_as(self, cls: type[T], raise_if_incorrect_type: bool ...
method to_input_list (line 251) | def to_input_list(
method agent_tool_invocation (line 272) | def agent_tool_invocation(self) -> AgentToolInvocation | None:
method last_response_id (line 289) | def last_response_id(self) -> str | None:
class RunResult (line 298) | class RunResult(RunResultBase):
method __post_init__ (line 334) | def __post_init__(self) -> None:
method last_agent (line 338) | def last_agent(self) -> Agent[Any]:
method _release_last_agent_reference (line 349) | def _release_last_agent_reference(self) -> None:
method to_state (line 357) | def to_state(self) -> RunState[Any]:
method __str__ (line 404) | def __str__(self) -> str:
class RunResultStreaming (line 409) | class RunResultStreaming(RunResultBase):
method __post_init__ (line 497) | def __post_init__(self, _run_impl_task: asyncio.Task[Any] | None) -> N...
method last_agent (line 507) | def last_agent(self) -> Agent[Any]:
method _release_last_agent_reference (line 520) | def _release_last_agent_reference(self) -> None:
method cancel (line 528) | def cancel(self, mode: Literal["immediate", "after_turn"] = "immediate...
method stream_events (line 576) | async def stream_events(self) -> AsyncIterator[StreamEvent]:
method _create_error_details (line 647) | def _create_error_details(self) -> RunErrorDetails:
method _check_errors (line 659) | def _check_errors(self):
method _cleanup_tasks (line 701) | def _cleanup_tasks(self):
method __str__ (line 711) | def __str__(self) -> str:
method _await_task_safely (line 714) | async def _await_task_safely(self, task: asyncio.Task[Any] | None) -> ...
method _drain_event_queue (line 730) | def _drain_event_queue(self) -> None:
method _drain_input_guardrail_queue (line 742) | def _drain_input_guardrail_queue(self) -> None:
method to_state (line 750) | def to_state(self) -> RunState[Any]:
FILE: src/agents/retry.py
class ModelRetryBackoffSettings (line 17) | class ModelRetryBackoffSettings:
method to_json_dict (line 32) | def to_json_dict(self) -> dict[str, Any]:
function _coerce_backoff_settings (line 39) | def _coerce_backoff_settings(
class ModelRetryNormalizedError (line 51) | class ModelRetryNormalizedError:
method __init__ (line 63) | def __init__(
class ModelRetryAdvice (line 94) | class ModelRetryAdvice:
class ModelRetryAdviceRequest (line 105) | class ModelRetryAdviceRequest:
class RetryDecision (line 116) | class RetryDecision:
class RetryPolicyContext (line 127) | class RetryPolicyContext:
function _mark_retry_capabilities (line 143) | def _mark_retry_capabilities(
function retry_policy_retries_safe_transport_errors (line 154) | def retry_policy_retries_safe_transport_errors(policy: RetryPolicy | Non...
function retry_policy_retries_all_transient_errors (line 158) | def retry_policy_retries_all_transient_errors(policy: RetryPolicy | None...
class ModelRetrySettings (line 163) | class ModelRetrySettings:
method __post_init__ (line 175) | def __post_init__(self) -> None:
method to_json_dict (line 178) | def to_json_dict(self) -> dict[str, Any]:
function _coerce_decision (line 186) | def _coerce_decision(value: bool | RetryDecision) -> RetryDecision:
function _evaluate_policy (line 192) | async def _evaluate_policy(
function _with_hard_veto (line 202) | def _with_hard_veto(decision: RetryDecision) -> RetryDecision:
function _with_replay_safe_approval (line 207) | def _with_replay_safe_approval(decision: RetryDecision) -> RetryDecision:
function _merge_positive_retry_decisions (line 212) | def _merge_positive_retry_decisions(
class _RetryPolicies (line 232) | class _RetryPolicies:
method never (line 233) | def never(self) -> RetryPolicy:
method provider_suggested (line 243) | def provider_suggested(self) -> RetryPolicy:
method network_error (line 261) | def network_error(self) -> RetryPolicy:
method retry_after (line 271) | def retry_after(self) -> RetryPolicy:
method http_status (line 286) | def http_status(self, statuses: Iterable[int]) -> RetryPolicy:
method all (line 299) | def all(self, *policies: RetryPolicy) -> RetryPolicy:
method any (line 330) | def any(self, *policies: RetryPolicy) -> RetryPolicy:
FILE: src/agents/run.py
function set_default_agent_runner (line 138) | def set_default_agent_runner(runner: AgentRunner | None) -> None:
function get_default_agent_runner (line 147) | def get_default_agent_runner() -> AgentRunner:
class Runner (line 156) | class Runner:
method run (line 158) | async def run(
method run_sync (line 238) | def run_sync(
method run_streamed (line 316) | def run_streamed(
class AgentRunner (line 391) | class AgentRunner:
method run (line 397) | async def run(
method run_sync (line 1346) | def run_sync(
method run_streamed (line 1429) | def run_streamed(
FILE: src/agents/run_config.py
function _default_trace_include_sensitive_data (line 30) | def _default_trace_include_sensitive_data() -> bool:
class ModelInputData (line 37) | class ModelInputData:
class CallModelData (line 45) | class CallModelData(Generic[TContext]):
class ToolErrorFormatterArgs (line 58) | class ToolErrorFormatterArgs(Generic[TContext]):
class RunConfig (line 84) | class RunConfig:
class RunOptions (line 195) | class RunOptions(TypedDict, Generic[TContext]):
FILE: src/agents/run_context.py
class _ApprovalRecord (line 29) | class _ApprovalRecord:
class RunContextWrapper (line 43) | class RunContextWrapper(Generic[TContext]):
method _to_str_or_none (line 65) | def _to_str_or_none(value: Any) -> str | None:
method _resolve_tool_name (line 76) | def _resolve_tool_name(approval_item: ToolApprovalItem) -> str:
method _resolve_tool_namespace (line 88) | def _resolve_tool_namespace(approval_item: ToolApprovalItem) -> str | ...
method _resolve_approval_key (line 99) | def _resolve_approval_key(approval_item: ToolApprovalItem) -> str:
method _resolve_approval_keys (line 114) | def _resolve_approval_keys(approval_item: ToolApprovalItem) -> tuple[s...
method _resolve_tool_lookup_key (line 126) | def _resolve_tool_lookup_key(approval_item: ToolApprovalItem) -> Funct...
method _resolve_call_id (line 146) | def _resolve_call_id(approval_item: ToolApprovalItem) -> str | None:
method _get_or_create_approval_entry (line 170) | def _get_or_create_approval_entry(self, tool_name: str) -> _ApprovalRe...
method is_tool_approved (line 177) | def is_tool_approved(self, tool_name: str, call_id: str) -> bool | None:
method _get_approval_status_for_key (line 181) | def _get_approval_status_for_key(self, approval_key: str, call_id: str...
method _clear_rejection_message (line 213) | def _clear_rejection_message(record: _ApprovalRecord, call_id: str | N...
method _get_rejection_message_for_key (line 219) | def _get_rejection_message_for_key(record: _ApprovalRecord, call_id: s...
method get_rejection_message (line 228) | def get_rejection_message(
method _apply_approval_decision (line 300) | def _apply_approval_decision(
method approve_tool (line 346) | def approve_tool(self, approval_item: ToolApprovalItem, always_approve...
method reject_tool (line 354) | def reject_tool(
method get_approval_status (line 368) | def get_approval_status(
method _rebuild_approvals (line 438) | def _rebuild_approvals(self, approvals: dict[str, dict[str, Any]]) -> ...
method _fork_with_tool_input (line 457) | def _fork_with_tool_input(self, tool_input: Any) -> RunContextWrapper[...
method _fork_without_tool_input (line 466) | def _fork_without_tool_input(self) -> RunContextWrapper[TContext]:
class AgentHookContext (line 476) | class AgentHookContext(RunContextWrapper[TContext]):
FILE: src/agents/run_error_handlers.py
class RunErrorData (line 16) | class RunErrorData:
class RunErrorHandlerInput (line 28) | class RunErrorHandlerInput(Generic[TContext]):
class RunErrorHandlerResult (line 35) | class RunErrorHandlerResult:
class RunErrorHandlers (line 49) | class RunErrorHandlers(TypedDict, Generic[TContext], total=False):
FILE: src/agents/run_internal/_asyncio_progress.py
function _get_awaitable_to_wait_on (line 18) | def _get_awaitable_to_wait_on(awaitable: Any) -> Any | None:
function _get_sleep_deadline_from_awaitable (line 29) | def _get_sleep_deadline_from_awaitable(
function _get_scheduled_future_deadline (line 59) | def _get_scheduled_future_deadline(
function _iter_shielded_future_child_tasks (line 83) | def _iter_shielded_future_child_tasks(future: asyncio.Future[Any]) -> tu...
function _iter_future_child_tasks (line 97) | def _iter_future_child_tasks(future: asyncio.Future[Any]) -> tuple[async...
function _get_self_progress_deadline_for_future (line 107) | def _get_self_progress_deadline_for_future(
function _get_self_progress_deadline_for_awaitable (line 151) | def _get_self_progress_deadline_for_awaitable(
function get_function_tool_task_progress_deadline (line 179) | def get_function_tool_task_progress_deadline(
FILE: src/agents/run_internal/agent_runner_helpers.py
function should_cancel_parallel_model_task_on_input_guardrail_trip (line 56) | def should_cancel_parallel_model_task_on_input_guardrail_trip() -> bool:
function apply_resumed_conversation_settings (line 72) | def apply_resumed_conversation_settings(
function validate_session_conversation_settings (line 90) | def validate_session_conversation_settings(
function resolve_trace_settings (line 107) | def resolve_trace_settings(
function resolve_resumed_context (line 137) | def resolve_resumed_context(
function ensure_context_wrapper (line 154) | def ensure_context_wrapper(
function describe_run_state_step (line 163) | def describe_run_state_step(step: object | None) -> str | int | None:
function build_generated_items_details (line 178) | def build_generated_items_details(
function build_resumed_stream_debug_extra (line 198) | def build_resumed_stream_debug_extra(
function finalize_conversation_tracking (line 217) | def finalize_conversation_tracking(
function build_interruption_result (line 236) | def build_interruption_result(
function append_model_response_if_new (line 282) | def append_model_response_if_new(
function input_guardrails_triggered (line 291) | def input_guardrails_triggered(results: list[InputGuardrailResult]) -> b...
function update_run_state_for_interruption (line 296) | def update_run_state_for_interruption(
function save_turn_items_if_needed (line 316) | async def save_turn_items_if_needed(
function resolve_processed_response (line 343) | def resolve_processed_response(
FILE: src/agents/run_internal/approvals.py
function append_approval_error_output (line 23) | def append_approval_error_output(
function filter_tool_approvals (line 43) | def filter_tool_approvals(interruptions: Sequence[Any]) -> list[ToolAppr...
function approvals_from_step (line 48) | def approvals_from_step(step: Any) -> list[ToolApprovalItem]:
function append_input_items_excluding_approvals (line 56) | def append_input_items_excluding_approvals(
function _build_function_tool_call_for_approval_error (line 74) | def _build_function_tool_call_for_approval_error(
FILE: src/agents/run_internal/error_handlers.py
function build_run_error_data (line 31) | def build_run_error_data(
function format_final_output_text (line 57) | def format_final_output_text(agent: Agent[Any], final_output: Any) -> str:
function validate_handler_final_output (line 80) | def validate_handler_final_output(agent: Agent[Any], final_output: Any) ...
function create_message_output_item (line 110) | def create_message_output_item(agent: Agent[Any], output_text: str) -> M...
function resolve_run_error_handler_result (line 128) | async def resolve_run_error_handler_result(
FILE: src/agents/run_internal/guardrails.py
function run_single_input_guardrail (line 30) | async def run_single_input_guardrail(
function run_single_output_guardrail (line 42) | async def run_single_output_guardrail(
function run_input_guardrails_with_queue (line 54) | async def run_input_guardrails_with_queue(
function run_input_guardrails (line 102) | async def run_input_guardrails(
function run_output_guardrails (line 137) | async def run_output_guardrails(
function input_guardrail_tripwire_triggered_for_stream (line 171) | async def input_guardrail_tripwire_triggered_for_stream(
FILE: src/agents/run_internal/items.py
function copy_input_items (line 55) | def copy_input_items(value: str | list[TResponseInputItem]) -> str | lis...
function run_item_to_input_item (line 60) | def run_item_to_input_item(
function run_items_to_input_items (line 77) | def run_items_to_input_items(
function drop_orphan_function_calls (line 90) | def drop_orphan_function_calls(
function ensure_input_item_format (line 132) | def ensure_input_item_format(item: TResponseInputItem) -> TResponseInput...
function normalize_input_items_for_api (line 141) | def normalize_input_items_for_api(items: list[TResponseInputItem]) -> li...
function prepare_model_input_items (line 156) | def prepare_model_input_items(
function normalize_resumed_input (line 170) | def normalize_resumed_input(
function fingerprint_input_item (line 180) | def fingerprint_input_item(item: Any, *, ignore_ids_for_matching: bool =...
function _dedupe_key (line 205) | def _dedupe_key(item: TResponseInputItem) -> str | None:
function _should_omit_reasoning_item_ids (line 234) | def _should_omit_reasoning_item_ids(reasoning_item_id_policy: ReasoningI...
function _without_reasoning_item_id (line 238) | def _without_reasoning_item_id(item: TResponseInputItem) -> TResponseInp...
function deduplicate_input_items (line 250) | def deduplicate_input_items(items: Sequence[TResponseInputItem]) -> list...
function deduplicate_input_items_preferring_latest (line 266) | def deduplicate_input_items_preferring_latest(
function function_rejection_item (line 275) | def function_rejection_item(
function shell_rejection_item (line 292) | def shell_rejection_item(
function apply_patch_rejection_item (line 312) | def apply_patch_rejection_item(
function extract_mcp_request_id (line 332) | def extract_mcp_request_id(raw_item: Any) -> str | None:
function extract_mcp_request_id_from_run (line 357) | def extract_mcp_request_id_from_run(mcp_run: Any) -> str | None:
function _completed_call_ids_by_type (line 382) | def _completed_call_ids_by_type(payload: list[TResponseInputItem]) -> di...
function _matched_anonymous_tool_search_call_indexes (line 399) | def _matched_anonymous_tool_search_call_indexes(payload: list[TResponseI...
function _coerce_to_dict (line 425) | def _coerce_to_dict(value: object) -> dict[str, Any] | None:
function _model_dump_without_warnings (line 436) | def _model_dump_without_warnings(value: object) -> dict[str, Any] | None:
FILE: src/agents/run_internal/model_retry.py
function _iter_error_chain (line 47) | def _iter_error_chain(error: Exception) -> Iterator[Exception]:
function _is_conversation_locked_error (line 57) | def _is_conversation_locked_error(error: Exception) -> bool:
function _get_header_value (line 63) | def _get_header_value(headers: Any, key: str) -> str | None:
function _extract_headers (line 75) | def _extract_headers(error: Exception) -> httpx.Headers | Mapping[str, s...
function _parse_retry_after (line 89) | def _parse_retry_after(headers: httpx.Headers | Mapping[str, str] | None...
function _get_status_code (line 121) | def _get_status_code(error: Exception) -> int | None:
function _get_error_code (line 134) | def _get_error_code(error: Exception) -> str | None:
function _get_request_id (line 153) | def _get_request_id(error: Exception) -> str | None:
function _is_abort_like_error (line 161) | def _is_abort_like_error(error: Exception) -> bool:
function _is_network_like_error (line 174) | def _is_network_like_error(error: Exception) -> bool:
function _normalize_retry_error (line 205) | def _normalize_retry_error(
function _coerce_retry_decision (line 245) | def _coerce_retry_decision(value: bool | RetryDecision) -> RetryDecision:
function _call_retry_policy (line 251) | async def _call_retry_policy(
function _default_retry_delay (line 261) | def _default_retry_delay(
function _sleep_for_retry (line 293) | async def _sleep_for_retry(delay: float) -> None:
function _build_zero_request_usage_entry (line 299) | def _build_zero_request_usage_entry() -> RequestUsage:
function _build_request_usage_entry_from_usage (line 309) | def _build_request_usage_entry_from_usage(usage: Usage) -> RequestUsage:
function apply_retry_attempt_usage (line 319) | def apply_retry_attempt_usage(usage: Usage, failed_attempts: int) -> Usage:
function _close_async_iterator (line 334) | async def _close_async_iterator(iterator: Any) -> None:
function _close_async_iterator_quietly (line 347) | async def _close_async_iterator_quietly(iterator: Any | None) -> None:
function _get_stream_event_type (line 357) | def _get_stream_event_type(event: TResponseStreamEvent) -> str | None:
function _stream_event_blocks_retry (line 365) | def _stream_event_blocks_retry(event: TResponseStreamEvent) -> bool:
function _evaluate_retry (line 370) | async def _evaluate_retry(
function _is_stateful_request (line 436) | def _is_stateful_request(
function _should_preserve_conversation_locked_compatibility (line 444) | def _should_preserve_conversation_locked_compatibility(
function _should_disable_provider_managed_retries (line 456) | def _should_disable_provider_managed_retries(
function _should_disable_websocket_pre_event_retry (line 494) | def _should_disable_websocket_pre_event_retry(
function get_response_with_retry (line 511) | async def get_response_with_retry(
function stream_response_with_retry (line 610) | async def stream_response_with_retry(
FILE: src/agents/run_internal/oai_conversation.py
function _normalize_server_item_id (line 35) | def _normalize_server_item_id(value: Any) -> str | None:
function _fingerprint_for_tracker (line 43) | def _fingerprint_for_tracker(item: Any) -> str | None:
function _anonymous_tool_search_fingerprint (line 67) | def _anonymous_tool_search_fingerprint(item: Any) -> str | None:
function _is_tool_search_item (line 81) | def _is_tool_search_item(item: Any) -> bool:
class OpenAIServerConversationTracker (line 88) | class OpenAIServerConversationTracker:
method __post_init__ (line 129) | def __post_init__(self):
method hydrate_from_state (line 137) | def hydrate_from_state(
method track_server_items (line 294) | def track_server_items(self, model_response: ModelResponse | None) -> ...
method mark_input_as_sent (line 343) | def mark_input_as_sent(self, items: Sequence[TResponseInputItem]) -> N...
method rewind_input (line 378) | def rewind_input(self, items: Sequence[TResponseInputItem]) -> None:
method prepare_input (line 401) | def prepare_input(
method _register_prepared_item_source (line 501) | def _register_prepared_item_source(
method _resolve_prepared_item_source (line 513) | def _resolve_prepared_item_source(self, item: TResponseInputItem) -> T...
method _consume_prepared_item_source (line 527) | def _consume_prepared_item_source(self, item: TResponseInputItem) -> T...
FILE: src/agents/run_internal/run_loop.py
function _should_persist_stream_items (line 233) | async def _should_persist_stream_items(
function _prepare_turn_input_items (line 245) | def _prepare_turn_input_items(
function _complete_stream_interruption (line 255) | def _complete_stream_interruption(
function _save_resumed_stream_items (line 267) | async def _save_resumed_stream_items(
function _save_stream_items (line 297) | async def _save_stream_items(
function _run_output_guardrails_for_stream (line 328) | async def _run_output_guardrails_for_stream(
function _finalize_streamed_final_output (line 351) | async def _finalize_streamed_final_output(
function _finalize_streamed_interruption (line 379) | async def _finalize_streamed_interruption(
function start_streaming (line 400) | async def start_streaming(
function run_single_turn_streamed (line 1087) | async def run_single_turn_streamed(
function run_single_turn (line 1481) | async def run_single_turn(
function get_new_response (line 1564) | async def get_new_response(
FILE: src/agents/run_internal/run_steps.py
class QueueCompleteSentinel (line 49) | class QueueCompleteSentinel:
class ToolRunHandoff (line 59) | class ToolRunHandoff:
class ToolRunFunction (line 65) | class ToolRunFunction:
class ToolRunComputerAction (line 71) | class ToolRunComputerAction:
class ToolRunMCPApprovalRequest (line 77) | class ToolRunMCPApprovalRequest:
class ToolRunLocalShellCall (line 83) | class ToolRunLocalShellCall:
class ToolRunShellCall (line 89) | class ToolRunShellCall:
class ToolRunApplyPatchCall (line 95) | class ToolRunApplyPatchCall:
class ProcessedResponse (line 101) | class ProcessedResponse:
method has_tools_or_approvals_to_run (line 113) | def has_tools_or_approvals_to_run(self) -> bool:
method has_interruptions (line 128) | def has_interruptions(self) -> bool:
class NextStepHandoff (line 134) | class NextStepHandoff:
class NextStepFinalOutput (line 139) | class NextStepFinalOutput:
class NextStepRunAgain (line 144) | class NextStepRunAgain:
class NextStepInterruption (line 149) | class NextStepInterruption:
class SingleStepResult (line 157) | class SingleStepResult:
method generated_items (line 191) | def generated_items(self) -> list[RunItem]:
FILE: src/agents/run_internal/session_persistence.py
function prepare_input_with_session (line 53) | async def prepare_input_with_session(
function persist_session_items_for_guardrail_trip (line 170) | async def persist_session_items_for_guardrail_trip(
function session_items_for_turn (line 195) | def session_items_for_turn(turn_result: SingleStepResult) -> list[RunItem]:
function resumed_turn_items (line 205) | def resumed_turn_items(turn_result: SingleStepResult) -> tuple[list[RunI...
function update_run_state_after_resume (line 212) | def update_run_state_after_resume(
function save_result_to_session (line 227) | async def save_result_to_session(
function save_resumed_turn_items (line 362) | async def save_resumed_turn_items(
function rewind_session_items (line 386) | async def rewind_session_items(
function wait_for_session_cleanup (line 508) | async def wait_for_session_cleanup(
function _ignore_ids_for_matching (line 555) | def _ignore_ids_for_matching(session: Session) -> bool:
function _sanitize_openai_conversation_item (line 562) | def _sanitize_openai_conversation_item(item: TResponseInputItem) -> TRes...
function _fingerprint_or_repr (line 572) | def _fingerprint_or_repr(item: TResponseInputItem, *, ignore_ids_for_mat...
function _session_item_key (line 579) | def _session_item_key(item: Any) -> str:
function _build_reference_map (line 593) | def _build_reference_map(items: Sequence[Any]) -> dict[str, list[Any]]:
function _consume_reference (line 602) | def _consume_reference(ref_map: dict[str, list[Any]], key: str, candidat...
function _build_frequency_map (line 616) | def _build_frequency_map(items: Sequence[Any]) -> dict[str, int]:
FILE: src/agents/run_internal/streaming.py
function stream_step_items_to_queue (line 27) | def stream_step_items_to_queue(
function stream_step_result_to_queue (line 65) | def stream_step_result_to_queue(
FILE: src/agents/run_internal/tool_actions.py
function _serialize_trace_payload (line 73) | def _serialize_trace_payload(payload: Any) -> str:
class ComputerAction (line 89) | class ComputerAction:
method execute (line 96) | async def execute(
method _execute_action_and_capture (line 183) | async def _execute_action_and_capture(
method _iter_actions (line 260) | def _iter_actions(tool_call: ResponseComputerToolCall) -> list[Any]:
method _get_trace_input_payload (line 270) | def _get_trace_input_payload(cls, tool_call: ResponseComputerToolCall)...
method _serialize_action_payload (line 279) | def _serialize_action_payload(action: Any) -> Any:
class LocalShellAction (line 289) | class LocalShellAction:
method execute (line 293) | async def execute(
class ShellAction (line 341) | class ShellAction:
method execute (line 345) | async def execute(
class ApplyPatchAction (line 523) | class ApplyPatchAction:
method execute (line 527) | async def execute(
FILE: src/agents/run_internal/tool_execution.py
class _FunctionToolFailure (line 164) | class _FunctionToolFailure:
class _FunctionToolTaskState (line 173) | class _FunctionToolTaskState:
function _background_cleanup_task_exception_message (line 182) | def _background_cleanup_task_exception_message(exc: BaseException) -> st...
function _background_post_invoke_task_exception_message (line 194) | def _background_post_invoke_task_exception_message(exc: BaseException) -...
function _parent_cancelled_task_exception_message (line 200) | def _parent_cancelled_task_exception_message(exc: BaseException) -> str ...
function _consume_function_tool_task_result (line 207) | def _consume_function_tool_task_result(
function _get_function_tool_failure_priority (line 233) | def _get_function_tool_failure_priority(error: BaseException) -> int:
function _select_function_tool_failure (line 242) | def _select_function_tool_failure(
function _merge_late_function_tool_failure (line 261) | def _merge_late_function_tool_failure(
function _cancel_function_tool_tasks (line 282) | def _cancel_function_tool_tasks(tasks: set[asyncio.Task[Any]]) -> None:
function _attach_function_tool_task_result_callbacks (line 288) | def _attach_function_tool_task_result_callbacks(
function _record_completed_function_tool_tasks (line 302) | def _record_completed_function_tool_tasks(
function _collect_settled_function_tool_tasks (line 334) | def _collect_settled_function_tool_tasks(
function _wait_for_cancelled_function_tool_task_progress (line 357) | async def _wait_for_cancelled_function_tool_task_progress(
function _wait_for_function_tool_task_completion (line 398) | async def _wait_for_function_tool_task_completion(
function _settle_pending_function_tool_tasks (line 412) | async def _settle_pending_function_tool_tasks(
function _drain_cancelled_function_tool_tasks (line 462) | async def _drain_cancelled_function_tool_tasks(
function _wait_pending_function_tool_tasks_for_timeout (line 489) | async def _wait_pending_function_tool_tasks_for_timeout(
function maybe_reset_tool_choice (line 513) | def maybe_reset_tool_choice(
function resolve_enabled_function_tools (line 524) | async def resolve_enabled_function_tools(
function initialize_computer_tools (line 547) | async def initialize_computer_tools(
function get_mapping_or_attr (line 562) | def get_mapping_or_attr(target: Any, key: str) -> Any:
function extract_tool_call_id (line 569) | def extract_tool_call_id(raw: Any) -> str | None:
function extract_shell_call_id (line 581) | def extract_shell_call_id(tool_call: Any) -> str:
function coerce_shell_call (line 589) | def coerce_shell_call(tool_call: Any) -> ShellCallData:
function _parse_apply_patch_json (line 637) | def _parse_apply_patch_json(payload: str, *, label: str) -> dict[str, Any]:
function parse_apply_patch_custom_input (line 648) | def parse_apply_patch_custom_input(input_json: str) -> dict[str, Any]:
function parse_apply_patch_function_args (line 653) | def parse_apply_patch_function_args(arguments: str) -> dict[str, Any]:
function extract_apply_patch_call_id (line 658) | def extract_apply_patch_call_id(tool_call: Any) -> str:
function coerce_apply_patch_operation (line 666) | def coerce_apply_patch_operation(
function normalize_apply_patch_result (line 701) | def normalize_apply_patch_result(
function is_apply_patch_name (line 720) | def is_apply_patch_name(name: str | None, tool: ApplyPatchTool | None) -...
function normalize_shell_output (line 732) | def normalize_shell_output(entry: ShellCommandOutput | Mapping[str, Any]...
function serialize_shell_output (line 781) | def serialize_shell_output(output: ShellCommandOutput) -> dict[str, Any]:
function resolve_exit_code (line 800) | def resolve_exit_code(raw_exit_code: Any, outcome_status: str | None) ->...
function render_shell_outputs (line 814) | def render_shell_outputs(outputs: Sequence[ShellCommandOutput]) -> str:
function truncate_shell_outputs (line 847) | def truncate_shell_outputs(
function normalize_shell_output_entries (line 887) | def normalize_shell_output_entries(
function normalize_max_output_length (line 922) | def normalize_max_output_length(value: int | None) -> int | None:
function format_shell_error (line 929) | def format_shell_error(error: Exception | BaseException | Any) -> str:
function get_trace_tool_error (line 940) | def get_trace_tool_error(*, trace_include_sensitive_data: bool, error_me...
function with_tool_function_span (line 945) | async def with_tool_function_span(
function build_litellm_json_tool_call (line 967) | def build_litellm_json_tool_call(output: ResponseFunctionToolCall) -> Fu...
function resolve_approval_status (line 986) | async def resolve_approval_status(
function resolve_approval_interruption (line 1031) | def resolve_approval_interruption(
function resolve_approval_rejection_message (line 1045) | async def resolve_approval_rejection_message(
function function_needs_approval (line 1101) | async def function_needs_approval(
function process_hosted_mcp_approvals (line 1122) | def process_hosted_mcp_approvals(
function collect_manual_mcp_approvals (line 1185) | def collect_manual_mcp_approvals(
function index_approval_items_by_call_id (line 1247) | def index_approval_items_by_call_id(items: Sequence[RunItem]) -> dict[st...
function should_keep_hosted_mcp_item (line 1259) | def should_keep_hosted_mcp_item(
class _FunctionToolBatchExecutor (line 1276) | class _FunctionToolBatchExecutor:
method __init__ (line 1279) | def __init__(
method execute (line 1307) | async def execute(
method _create_tool_task (line 1336) | def _create_tool_task(self, tool_run: ToolRunFunction, order: int) -> ...
method _drain_pending_tasks (line 1348) | async def _drain_pending_tasks(self) -> None:
method _raise_failure_after_draining_siblings (line 1362) | async def _raise_failure_after_draining_siblings(
method _partition_pending_tasks (line 1393) | def _partition_pending_tasks(self) -> tuple[set[asyncio.Task[Any]], se...
method _drain_cancelled_tasks (line 1399) | async def _drain_cancelled_tasks(
method _wait_post_invoke_tasks (line 1414) | async def _wait_post_invoke_tasks(
method _cancel_pending_tasks_for_parent_cancellation (line 1429) | def _cancel_pending_tasks_for_parent_cancellation(self) -> None:
method _run_single_tool (line 1437) | async def _run_single_tool(
method _maybe_execute_tool_approval (line 1506) | async def _maybe_execute_tool_approval(
method _execute_single_tool_body (line 1583) | async def _execute_single_tool_body(
method _invoke_tool_and_run_post_invoke (line 1624) | async def _invoke_tool_and_run_post_invoke(
method _await_invoke_task (line 1680) | async def _await_invoke_task(
method _get_nested_tool_interruptions (line 1717) | def _get_nested_tool_interruptions(
method _consume_nested_tool_run_result (line 1726) | def _consume_nested_tool_run_result(
method _resolve_nested_tool_run_result (line 1737) | def _resolve_nested_tool_run_result(
method _build_function_tool_results (line 1751) | def _build_function_tool_results(self) -> list[FunctionToolResult]:
function execute_function_tool_calls (line 1793) | async def execute_function_tool_calls(
function execute_local_shell_calls (line 1815) | async def execute_local_shell_calls(
function execute_shell_calls (line 1840) | async def execute_shell_calls(
function execute_apply_patch_calls (line 1865) | async def execute_apply_patch_calls(
function execute_computer_actions (line 1890) | async def execute_computer_actions(
function execute_approved_tools (line 1940) | async def execute_approved_tools(
function _execute_tool_input_guardrails (line 2108) | async def _execute_tool_input_guardrails(
function _execute_tool_output_guardrails (line 2142) | async def _execute_tool_output_guardrails(
function _normalize_exit_code (line 2180) | def _normalize_exit_code(value: Any) -> int | None:
function _is_hosted_mcp_approval_request (line 2190) | def _is_hosted_mcp_approval_request(raw_item: Any) -> bool:
FILE: src/agents/run_internal/tool_planning.py
function _hashable_identity_value (line 66) | def _hashable_identity_value(value: Any) -> Hashable | None:
function _tool_call_identity (line 80) | def _tool_call_identity(raw: Any) -> tuple[str | None, str | None, Hasha...
function execute_mcp_approval_requests (line 92) | async def execute_mcp_approval_requests(
function _build_tool_output_index (line 133) | def _build_tool_output_index(items: Sequence[RunItem]) -> set[tuple[str,...
function _dedupe_tool_call_items (line 151) | def _dedupe_tool_call_items(
class ToolExecutionPlan (line 171) | class ToolExecutionPlan:
method has_interruptions (line 184) | def has_interruptions(self) -> bool:
function _partition_mcp_approval_requests (line 188) | def _partition_mcp_approval_requests(
function _collect_mcp_approval_plan (line 202) | def _collect_mcp_approval_plan(
function _build_plan_for_fresh_turn (line 228) | def _build_plan_for_fresh_turn(
function _build_plan_for_resume_turn (line 257) | def _build_plan_for_resume_turn(
function _collect_tool_interruptions (line 291) | def _collect_tool_interruptions(
function _build_tool_result_items (line 319) | def _build_tool_result_items(
function _make_unique_item_appender (line 341) | def _make_unique_item_appender(
function _collect_runs_by_approval (line 359) | async def _collect_runs_by_approval(
function _apply_manual_mcp_approvals (line 427) | def _apply_manual_mcp_approvals(
function _append_mcp_callback_results (line 448) | async def _append_mcp_callback_results(
function _select_function_tool_runs_for_resume (line 467) | async def _select_function_tool_runs_for_resume(
function _execute_tool_plan (line 518) | async def _execute_tool_plan(
FILE: src/agents/run_internal/tool_use_tracker.py
class AgentToolUseTracker (line 46) | class AgentToolUseTracker:
method __init__ (line 49) | def __init__(self) -> None:
method record_used_tools (line 55) | def record_used_tools(self, agent: Agent[Any], tools: list[ToolRunFunc...
method record_processed_response (line 62) | def record_processed_response(
method add_tool_use (line 79) | def add_tool_use(self, agent: Agent[Any], tool_names: list[str]) -> None:
method has_used_tools (line 94) | def has_used_tools(self, agent: Agent[Any]) -> bool:
method as_serializable (line 98) | def as_serializable(self) -> dict[str, list[str]]:
method from_serializable (line 109) | def from_serializable(cls, data: dict[str, list[str]]) -> AgentToolUse...
function serialize_tool_use_tracker (line 115) | def serialize_tool_use_tracker(tool_use_tracker: AgentToolUseTracker) ->...
function hydrate_tool_use_tracker (line 123) | def hydrate_tool_use_tracker(
function get_tool_call_types (line 141) | def get_tool_call_types() -> tuple[type, ...]:
FILE: src/agents/run_internal/turn_preparation.py
function validate_run_hooks (line 30) | def validate_run_hooks(
function maybe_filter_model_input (line 48) | async def maybe_filter_model_input(
function get_handoffs (line 85) | async def get_handoffs(agent: Agent[Any], context_wrapper: RunContextWra...
function get_all_tools (line 108) | async def get_all_tools(agent: Agent[Any], context_wrapper: RunContextWr...
function get_output_schema (line 113) | def get_output_schema(agent: Agent[Any]) -> AgentOutputSchemaBase | None:
function get_model (line 123) | def get_model(agent: Agent[Any], run_config: RunConfig) -> Model:
FILE: src/agents/run_internal/turn_resolution.py
function _maybe_finalize_from_tool_results (line 156) | async def _maybe_finalize_from_tool_results(
function run_final_output_hooks (line 198) | async def run_final_output_hooks(
function execute_final_output_step (line 219) | async def execute_final_output_step(
function execute_final_output (line 252) | async def execute_final_output(
function execute_handoffs (line 285) | async def execute_handoffs(
function check_for_final_output_from_tools (line 464) | async def check_for_final_output_from_tools(
function execute_tools_and_side_effects (line 499) | async def execute_tools_and_side_effects(
function resolve_interrupted_turn (line 665) | async def resolve_interrupted_turn(
function process_model_response (line 1273) | def process_model_response(
function get_single_step_result_from_response (line 1685) | async def get_single_step_result_from_response(
FILE: src/agents/run_state.py
class RunState (line 139) | class RunState(Generic[TContext, TAgent]):
method __init__ (line 223) | def __init__(
method get_interruptions (line 261) | def get_interruptions(self) -> list[ToolApprovalItem]:
method approve (line 270) | def approve(self, approval_item: ToolApprovalItem, always_approve: boo...
method reject (line 276) | def reject(
method _serialize_approvals (line 297) | def _serialize_approvals(self) -> dict[str, dict[str, Any]]:
method _serialize_model_responses (line 319) | def _serialize_model_responses(self) -> list[dict[str, Any]]:
method _serialize_original_input (line 331) | def _serialize_original_input(self) -> str | list[Any]:
method _serialize_context_payload (line 351) | def _serialize_context_payload(
method _serialize_tool_input (line 476) | def _serialize_tool_input(self, tool_input: Any) -> Any:
method _current_generated_items_merge_marker (line 493) | def _current_generated_items_merge_marker(self) -> str | None:
method _mark_generated_items_merged_with_last_processed (line 514) | def _mark_generated_items_merged_with_last_processed(self) -> None:
method _clear_generated_items_last_processed_marker (line 518) | def _clear_generated_items_last_processed_marker(self) -> None:
method _merge_generated_items_with_processed (line 522) | def _merge_generated_items_with_processed(self) -> list[RunItem]:
method to_json (line 589) | def to_json(
method _serialize_processed_response (line 684) | def _serialize_processed_response(
method _serialize_current_step (line 725) | def _serialize_current_step(self) -> dict[str, Any] | None:
method _serialize_item (line 748) | def _serialize_item(self, item: RunItem) -> dict[str, Any]:
method _lookup_function_name (line 790) | def _lookup_function_name(self, call_id: str) -> str:
method to_string (line 839) | def to_string(
method set_trace (line 863) | def set_trace(self, trace: Trace | None) -> None:
method _serialize_trace_data (line 867) | def _serialize_trace_data(self, *, include_tracing_api_key: bool) -> d...
method set_tool_use_tracker_snapshot (line 872) | def set_tool_use_tracker_snapshot(self, snapshot: Mapping[str, Sequenc...
method set_reasoning_item_id_policy (line 885) | def set_reasoning_item_id_policy(self, policy: Literal["preserve", "om...
method get_tool_use_tracker_snapshot (line 889) | def get_tool_use_tracker_snapshot(self) -> dict[str, list[str]]:
method from_string (line 897) | async def from_string(
method from_json (line 938) | async def from_json(
function _get_attr (line 979) | def _get_attr(obj: Any, attr: str, default: Any = None) -> Any:
function _describe_context_type (line 984) | def _describe_context_type(value: Any) -> str:
function _context_class_path (line 997) | def _context_class_path(value: Any) -> str | None:
function _build_context_meta (line 1009) | def _build_context_meta(
function _context_meta_requires_deserializer (line 1031) | def _context_meta_requires_deserializer(context_meta: Mapping[str, Any] ...
function _context_meta_warning_message (line 1040) | def _context_meta_warning_message(context_meta: Mapping[str, Any] | N
Condensed preview — 870 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (7,067K chars).
[
{
"path": ".agents/skills/code-change-verification/SKILL.md",
"chars": 1809,
"preview": "---\nname: code-change-verification\ndescription: Run the mandatory verification stack when changes affect runtime code, t"
},
{
"path": ".agents/skills/code-change-verification/agents/openai.yaml",
"chars": 239,
"preview": "interface:\n display_name: \"Code Change Verification\"\n short_description: \"Run the required local verification stack\"\n "
},
{
"path": ".agents/skills/code-change-verification/scripts/run.ps1",
"chars": 883,
"preview": "Set-StrictMode -Version Latest\n$ErrorActionPreference = \"Stop\"\n\n$scriptDir = Split-Path -Parent $MyInvocation.MyCommand."
},
{
"path": ".agents/skills/code-change-verification/scripts/run.sh",
"chars": 588,
"preview": "#!/usr/bin/env bash\n# Fail fast on any error or undefined variable.\nset -euo pipefail\n\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BA"
},
{
"path": ".agents/skills/docs-sync/SKILL.md",
"chars": 4338,
"preview": "---\nname: docs-sync\ndescription: Analyze main branch implementation and configuration to find missing, incorrect, or out"
},
{
"path": ".agents/skills/docs-sync/agents/openai.yaml",
"chars": 232,
"preview": "interface:\n display_name: \"Docs Sync\"\n short_description: \"Audit docs coverage and propose targeted updates\"\n default"
},
{
"path": ".agents/skills/docs-sync/references/doc-coverage-checklist.md",
"chars": 2528,
"preview": "# Doc Coverage Checklist\n\nUse this checklist to scan the selected scope (main = comprehensive, or current-branch diff) a"
},
{
"path": ".agents/skills/examples-auto-run/SKILL.md",
"chars": 3190,
"preview": "---\nname: examples-auto-run\ndescription: Run python examples in auto mode with logging, rerun helpers, and background co"
},
{
"path": ".agents/skills/examples-auto-run/agents/openai.yaml",
"chars": 248,
"preview": "interface:\n display_name: \"Examples Auto Run\"\n short_description: \"Run examples in auto mode with logs and rerun helpe"
},
{
"path": ".agents/skills/examples-auto-run/scripts/run.sh",
"chars": 6304,
"preview": "#!/usr/bin/env bash\nset -euo pipefail\n\nROOT=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")/../../../..\" && pwd)\"\nPID_FILE=\"$ROOT/"
},
{
"path": ".agents/skills/final-release-review/SKILL.md",
"chars": 7951,
"preview": "---\nname: final-release-review\ndescription: Perform a release-readiness review by locating the previous release tag from"
},
{
"path": ".agents/skills/final-release-review/agents/openai.yaml",
"chars": 269,
"preview": "interface:\n display_name: \"Final Release Review\"\n short_description: \"Audit a release candidate against the previous t"
},
{
"path": ".agents/skills/final-release-review/references/review-checklist.md",
"chars": 4058,
"preview": "# Release Diff Review Checklist\n\n## Quick commands\n\n- Sync tags: `git fetch origin --tags --prune`.\n- Identify latest re"
},
{
"path": ".agents/skills/final-release-review/scripts/find_latest_release_tag.sh",
"chars": 422,
"preview": "#!/usr/bin/env bash\nset -euo pipefail\n\nremote=\"${1:-origin}\"\npattern=\"${2:-v*}\"\n\n# Sync tags from the remote to ensure t"
},
{
"path": ".agents/skills/implementation-strategy/SKILL.md",
"chars": 4469,
"preview": "---\nname: implementation-strategy\ndescription: Decide how to implement runtime and API changes in openai-agents-python b"
},
{
"path": ".agents/skills/implementation-strategy/agents/openai.yaml",
"chars": 268,
"preview": "interface:\n display_name: \"Implementation Strategy\"\n short_description: \"Choose a compatibility-aware implementation p"
},
{
"path": ".agents/skills/openai-knowledge/SKILL.md",
"chars": 1880,
"preview": "---\nname: openai-knowledge\ndescription: Use when working with the OpenAI API (Responses API) or OpenAI platform features"
},
{
"path": ".agents/skills/openai-knowledge/agents/openai.yaml",
"chars": 233,
"preview": "interface:\n display_name: \"OpenAI Knowledge\"\n short_description: \"Pull authoritative OpenAI platform documentation\"\n "
},
{
"path": ".agents/skills/pr-draft-summary/SKILL.md",
"chars": 5382,
"preview": "---\nname: pr-draft-summary\ndescription: Create a PR title and draft description after substantive code changes are finis"
},
{
"path": ".agents/skills/pr-draft-summary/agents/openai.yaml",
"chars": 251,
"preview": "interface:\n display_name: \"PR Draft Summary\"\n short_description: \"Draft the repo-ready PR title and description\"\n def"
},
{
"path": ".agents/skills/test-coverage-improver/SKILL.md",
"chars": 2711,
"preview": "---\nname: test-coverage-improver\ndescription: 'Improve test coverage in the OpenAI Agents Python repository: run `make c"
},
{
"path": ".agents/skills/test-coverage-improver/agents/openai.yaml",
"chars": 264,
"preview": "interface:\n display_name: \"Test Coverage Improver\"\n short_description: \"Analyze coverage gaps and propose high-impact "
},
{
"path": ".github/ISSUE_TEMPLATE/bug_report.md",
"chars": 643,
"preview": "---\nname: Bug report\nabout: Report a bug\ntitle: ''\nlabels: bug\nassignees: ''\n\n---\n\n### Please read this first\n\n- **Have "
},
{
"path": ".github/ISSUE_TEMPLATE/feature_request.md",
"chars": 453,
"preview": "---\nname: Feature request\nabout: Suggest an idea for this project\ntitle: ''\nlabels: enhancement\nassignees: ''\n\n---\n\n### "
},
{
"path": ".github/ISSUE_TEMPLATE/model_provider.md",
"chars": 806,
"preview": "---\nname: Custom model providers\nabout: Questions or bugs about using non-OpenAI models\ntitle: ''\nlabels: bug\nassignees:"
},
{
"path": ".github/ISSUE_TEMPLATE/question.md",
"chars": 369,
"preview": "---\nname: Question\nabout: Questions about the SDK\ntitle: ''\nlabels: question\nassignees: ''\n\n---\n\n### Please read this fi"
},
{
"path": ".github/PULL_REQUEST_TEMPLATE/pull_request_template.md",
"chars": 392,
"preview": "### Summary\n\n<!-- Please give a short summary of the change and the problem this solves. -->\n\n### Test plan\n\n<!-- Please"
},
{
"path": ".github/codex/prompts/pr-labels.md",
"chars": 5449,
"preview": "# PR auto-labeling\n\nYou are Codex running in CI to propose labels for a pull request in the openai-agents-python reposit"
},
{
"path": ".github/codex/prompts/release-review.md",
"chars": 1054,
"preview": "# Release readiness review\n\nYou are Codex running in CI. Produce a release readiness report for this repository.\n\nSteps:"
},
{
"path": ".github/codex/schemas/pr-labels.json",
"chars": 579,
"preview": "{\n \"type\": \"object\",\n \"additionalProperties\": false,\n \"required\": [\"labels\"],\n \"properties\": {\n \"labels\": {\n "
},
{
"path": ".github/dependabot.yml",
"chars": 186,
"preview": "version: 2\nupdates:\n - package-ecosystem: \"github-actions\"\n directory: \"/\"\n schedule:\n interval: \"monthly\"\n "
},
{
"path": ".github/scripts/detect-changes.sh",
"chars": 1457,
"preview": "#!/usr/bin/env bash\nset -euo pipefail\n\nmode=\"${1:-code}\"\nbase_sha=\"${2:-${BASE_SHA:-}}\"\nhead_sha=\"${3:-${HEAD_SHA:-}}\"\n\n"
},
{
"path": ".github/scripts/pr_labels.py",
"chars": 13520,
"preview": "#!/usr/bin/env python3\nfrom __future__ import annotations\n\nimport argparse\nimport json\nimport os\nimport pathlib\nimport s"
},
{
"path": ".github/scripts/run-asyncio-teardown-stability.sh",
"chars": 424,
"preview": "#!/usr/bin/env bash\nset -euo pipefail\n\nrepeat_count=\"${1:-5}\"\n\nasyncio_progress_args=(\n tests/test_asyncio_progress.py\n"
},
{
"path": ".github/scripts/select-release-milestone.py",
"chars": 5927,
"preview": "#!/usr/bin/env python3\nfrom __future__ import annotations\n\nimport argparse\nimport json\nimport os\nimport re\nimport subpro"
},
{
"path": ".github/workflows/docs.yml",
"chars": 1397,
"preview": "name: Deploy docs\n\non:\n push:\n branches:\n - main\n paths:\n - \"docs/**\"\n - \"mkdocs.yml\"\n\npermissions"
},
{
"path": ".github/workflows/issues.yml",
"chars": 1135,
"preview": "name: Close inactive issues\non:\n schedule:\n - cron: \"30 1 * * *\"\n\njobs:\n close-issues:\n runs-on: ubuntu-latest\n "
},
{
"path": ".github/workflows/pr-labels.yml",
"chars": 7730,
"preview": "name: Auto label PRs\n\non:\n pull_request_target:\n types:\n - opened\n - reopened\n - synchronize\n - "
},
{
"path": ".github/workflows/publish.yml",
"chars": 828,
"preview": "name: Publish to PyPI\n\non:\n release:\n types:\n - published\n\npermissions:\n contents: read\n\njobs:\n publish:\n "
},
{
"path": ".github/workflows/release-pr-update.yml",
"chars": 4427,
"preview": "name: Update release PR on main updates\n\non:\n push:\n branches:\n - main\n\nconcurrency:\n group: release-pr-update"
},
{
"path": ".github/workflows/release-pr.yml",
"chars": 6157,
"preview": "name: Create release PR\n\non:\n workflow_dispatch:\n inputs:\n version:\n description: \"Version to release (e"
},
{
"path": ".github/workflows/release-tag.yml",
"chars": 2654,
"preview": "name: Tag release on merge\n\non:\n pull_request:\n types:\n - closed\n branches:\n - main\n\npermissions:\n con"
},
{
"path": ".github/workflows/tests.yml",
"chars": 4499,
"preview": "name: Tests\n\non:\n push:\n branches:\n - main\n pull_request:\n # All PRs, including stacked PRs\n\npermissions:\n "
},
{
"path": ".github/workflows/update-docs.yml",
"chars": 3201,
"preview": "name: \"Update Translated Docs\"\n\n# This GitHub Actions job automates the process of updating all translated document page"
},
{
"path": ".gitignore",
"chars": 1586,
"preview": "# macOS Files\n.DS_Store\n\n# Byte-compiled / optimized / DLL files\n__pycache__/\n**/__pycache__/\n*.py[cod]\n*$py.class\n\n# C "
},
{
"path": ".prettierrc",
"chars": 166,
"preview": "{\n \"tabWidth\": 4,\n \"overrides\": [\n {\n \"files\": \"*.yml\",\n \"options\": {\n "
},
{
"path": ".vscode/launch.json",
"chars": 427,
"preview": "{\n // Use IntelliSense to learn about possible attributes.\n // Hover to view descriptions of existing attributes.\n"
},
{
"path": ".vscode/settings.json",
"chars": 147,
"preview": "{\n \"python.testing.pytestArgs\": [\n \"tests\"\n ],\n \"python.testing.unittestEnabled\": false,\n \"python.tes"
},
{
"path": "AGENTS.md",
"chars": 12094,
"preview": "# Contributor Guide\n\nThis guide helps new contributors get started with the OpenAI Agents Python repository. It covers r"
},
{
"path": "CLAUDE.md",
"chars": 41,
"preview": "Read the AGENTS.md file for instructions."
},
{
"path": "LICENSE",
"chars": 1063,
"preview": "MIT License\n\nCopyright (c) 2025 OpenAI\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof "
},
{
"path": "Makefile",
"chars": 1790,
"preview": ".PHONY: sync\nsync:\n\tuv sync --all-extras --all-packages --group dev\n\n.PHONY: format\nformat: \n\tuv run ruff format\n\tuv run"
},
{
"path": "PLANS.md",
"chars": 5467,
"preview": "# Codex Execution Plans (ExecPlans)\n\nThis file defines how to write and maintain an ExecPlan: a self-contained, living s"
},
{
"path": "README.md",
"chars": 4413,
"preview": "# OpenAI Agents SDK [](https://pypi.org/project"
},
{
"path": "docs/agents.md",
"chars": 15930,
"preview": "# Agents\n\nAgents are the core building block in your apps. An agent is a large language model (LLM) configured with inst"
},
{
"path": "docs/config.md",
"chars": 5256,
"preview": "# Configuration\n\nThis page covers SDK-wide defaults that you usually set once during application startup, such as the de"
},
{
"path": "docs/context.md",
"chars": 8128,
"preview": "# Context management\n\nContext is an overloaded term. There are two main classes of context you might care about:\n\n1. Con"
},
{
"path": "docs/examples.md",
"chars": 5415,
"preview": "# Examples\n\nCheck out a variety of sample implementations of the SDK in the examples section of the [repo](https://githu"
},
{
"path": "docs/guardrails.md",
"chars": 10407,
"preview": "# Guardrails\n\nGuardrails enable you to do checks and validations of user input and agent output. For example, imagine yo"
},
{
"path": "docs/handoffs.md",
"chars": 10584,
"preview": "# Handoffs\n\nHandoffs allow an agent to delegate tasks to another agent. This is particularly useful in scenarios where d"
},
{
"path": "docs/human_in_the_loop.md",
"chars": 14536,
"preview": "# Human-in-the-loop\n\nUse the human-in-the-loop (HITL) flow to pause agent execution until a person approves or rejects s"
},
{
"path": "docs/index.md",
"chars": 4402,
"preview": "# OpenAI Agents SDK\n\nThe [OpenAI Agents SDK](https://github.com/openai/openai-agents-python) enables you to build agenti"
},
{
"path": "docs/ja/agents.md",
"chars": 13104,
"preview": "---\nsearch:\n exclude: true\n---\n# エージェント\n\nエージェントは、アプリにおける中核的な基本コンポーネントです。エージェントは、大規模言語モデル ( LLM ) に instructions、ツール、さらに"
},
{
"path": "docs/ja/config.md",
"chars": 4065,
"preview": "---\nsearch:\n exclude: true\n---\n# 設定\n\nこのページでは、デフォルトの OpenAI キーやクライアント、デフォルトの OpenAI API 形式、トレーシングのエクスポート既定値、ロギングの動作など、通常"
},
{
"path": "docs/ja/context.md",
"chars": 6014,
"preview": "---\nsearch:\n exclude: true\n---\n# コンテキスト管理\n\nコンテキストは多義的な用語です。主に重要になるコンテキストは 2 つあります。\n\n1. コード内でローカルに利用可能なコンテキスト: これは、ツール関数"
},
{
"path": "docs/ja/examples.md",
"chars": 4336,
"preview": "---\nsearch:\n exclude: true\n---\n# コード例\n\n[repo](https://github.com/openai/openai-agents-python/tree/main/examples) の exam"
},
{
"path": "docs/ja/guardrails.md",
"chars": 8022,
"preview": "---\nsearch:\n exclude: true\n---\n# ガードレール\n\nガードレールを使うと、ユーザー入力とエージェント出力のチェックや検証を行えます。たとえば、顧客リクエスト対応のために非常に高性能(したがって低速 / 高コス"
},
{
"path": "docs/ja/handoffs.md",
"chars": 7594,
"preview": "---\nsearch:\n exclude: true\n---\n# ハンドオフ\n\nハンドオフを使うと、あるエージェントが別のエージェントにタスクを委譲できます。これは、異なるエージェントがそれぞれ異なる領域を専門にしているシナリオで特に有用"
},
{
"path": "docs/ja/human_in_the_loop.md",
"chars": 11081,
"preview": "---\nsearch:\n exclude: true\n---\n# Human-in-the-loop\n\nhuman-in-the-loop ( HITL ) フローを使用すると、機密性の高いツール呼び出しを人が承認または拒否するまで、エー"
},
{
"path": "docs/ja/index.md",
"chars": 3073,
"preview": "---\nsearch:\n exclude: true\n---\n# OpenAI Agents SDK\n\n[OpenAI Agents SDK](https://github.com/openai/openai-agents-python)"
},
{
"path": "docs/ja/mcp.md",
"chars": 15300,
"preview": "---\nsearch:\n exclude: true\n---\n# Model context protocol (MCP)\n\n[Model context protocol](https://modelcontextprotocol.io"
},
{
"path": "docs/ja/models/index.md",
"chars": 20735,
"preview": "---\nsearch:\n exclude: true\n---\n# モデル\n\nAgents SDK には、OpenAI モデルをすぐに使える形で 2 つの方式でサポートしています。\n\n- **推奨**: [`OpenAIResponse"
},
{
"path": "docs/ja/models/litellm.md",
"chars": 198,
"preview": "---\nsearch:\n exclude: true\n---\n# LiteLLM\n\n<script>\n window.location.replace(\"../#litellm\");\n</script>\n\nこのページは [Models "
},
{
"path": "docs/ja/multi_agent.md",
"chars": 2895,
"preview": "---\nsearch:\n exclude: true\n---\n# エージェントオーケストレーション\n\nオーケストレーションとは、アプリ内でのエージェントの流れを指します。どのエージェントが、どの順序で実行され、次に何が起こるかをどのように"
},
{
"path": "docs/ja/quickstart.md",
"chars": 4810,
"preview": "---\nsearch:\n exclude: true\n---\n# クイックスタート\n\n## プロジェクトと仮想環境の作成\n\nこれを行うのは 1 回だけで十分です。\n\n```bash\nmkdir my_project\ncd my_proje"
},
{
"path": "docs/ja/realtime/guide.md",
"chars": 10958,
"preview": "---\nsearch:\n exclude: true\n---\n# Realtime エージェントガイド\n\nこのガイドでは、 OpenAI Agents SDK の realtime レイヤーが OpenAI Realtime API にど"
},
{
"path": "docs/ja/realtime/quickstart.md",
"chars": 4914,
"preview": "---\nsearch:\n exclude: true\n---\n# クイックスタート\n\nPython SDK の Realtime エージェントは、WebSocket トランスポート経由の OpenAI Realtime API 上に構築さ"
},
{
"path": "docs/ja/realtime/transport.md",
"chars": 3482,
"preview": "---\nsearch:\n exclude: true\n---\n# Realtime トランスポート\n\nこのページは、realtime エージェントを Python アプリケーションにどのように組み込むかを判断するために使用します。\n\n!!"
},
{
"path": "docs/ja/release.md",
"chars": 3607,
"preview": "---\nsearch:\n exclude: true\n---\n# リリースプロセス / 変更履歴\n\nこのプロジェクトは、`0.Y.Z` 形式を使ったセマンティックバージョニングの少し修正版に従います。先頭の `0` は、SDK が依然とし"
},
{
"path": "docs/ja/repl.md",
"chars": 678,
"preview": "---\nsearch:\n exclude: true\n---\n# REPL ユーティリティ\n\nこの SDK は、ターミナル上でエージェントの挙動を素早く対話的にテストできる `run_demo_loop` を提供します。\n\n\n```pyt"
},
{
"path": "docs/ja/results.md",
"chars": 8983,
"preview": "---\nsearch:\n exclude: true\n---\n# 実行結果\n\n`Runner.run` メソッドを呼び出すと、次の 2 種類の結果タイプのいずれかを受け取ります。\n\n- `Runner.run(...)` または `R"
},
{
"path": "docs/ja/running_agents.md",
"chars": 20252,
"preview": "---\nsearch:\n exclude: true\n---\n# エージェントの実行\n\n[`Runner`][agents.run.Runner] クラスを介してエージェントを実行できます。方法は 3 つあります。\n\n1. [`Runne"
},
{
"path": "docs/ja/sessions/advanced_sqlite_session.md",
"chars": 8172,
"preview": "---\nsearch:\n exclude: true\n---\n# 高度な SQLite セッション\n\n`AdvancedSQLiteSession` は、基本的な `SQLiteSession` の拡張版であり、会話の分岐、詳細な使用状況"
},
{
"path": "docs/ja/sessions/encrypted_session.md",
"chars": 3922,
"preview": "---\nsearch:\n exclude: true\n---\n# 暗号化セッション\n\n`EncryptedSession` は、あらゆるセッション実装に対して透過的な暗号化を提供し、古い項目の自動有効期限切れによって会話データを保護します"
},
{
"path": "docs/ja/sessions/index.md",
"chars": 20342,
"preview": "---\nsearch:\n exclude: true\n---\n# セッション\n\nAgents SDK は、複数のエージェント実行にまたがって会話履歴を自動的に維持する組み込みのセッションメモリを提供しており、ターン間で `.to_inpu"
},
{
"path": "docs/ja/sessions/sqlalchemy_session.md",
"chars": 1734,
"preview": "---\nsearch:\n exclude: true\n---\n# SQLAlchemy セッション\n\n`SQLAlchemySession` は SQLAlchemy を使用して本番運用対応のセッション実装を提供し、セッションストレージに"
},
{
"path": "docs/ja/sessions.md",
"chars": 12180,
"preview": "---\nsearch:\n exclude: true\n---\n# セッション\n\nAgents SDK は、複数のエージェント実行にわたって会話履歴を自動で維持する組み込みのセッションメモリを提供し、ターン間で手動で `.to_input_"
},
{
"path": "docs/ja/streaming.md",
"chars": 5872,
"preview": "---\nsearch:\n exclude: true\n---\n# ストリーミング\n\nストリーミングを使うと、エージェントの実行が進行する間の更新を購読できます。これは、エンドユーザーに進捗更新や部分的な応答を表示するのに役立ちます。\n\nス"
},
{
"path": "docs/ja/tools.md",
"chars": 30939,
"preview": "---\nsearch:\n exclude: true\n---\n# ツール\n\nツールを使うと、エージェントはアクションを実行できます。たとえば、データ取得、コード実行、外部 API 呼び出し、さらにはコンピュータ操作などです。 SDK は "
},
{
"path": "docs/ja/tracing.md",
"chars": 8011,
"preview": "---\nsearch:\n exclude: true\n---\n# トレーシング\n\nAgents SDK には組み込みのトレーシングが含まれており、エージェント実行中のイベント( LLM 生成、ツール呼び出し、ハンドオフ、ガードレール、さら"
},
{
"path": "docs/ja/usage.md",
"chars": 3186,
"preview": "---\nsearch:\n exclude: true\n---\n# 使用方法\n\nAgents SDK は、実行ごとのトークン使用量を自動的に追跡します。実行コンテキストからアクセスでき、コスト監視、制限の適用、分析記録に利用できます。\n\n#"
},
{
"path": "docs/ja/visualization.md",
"chars": 2384,
"preview": "---\nsearch:\n exclude: true\n---\n# エージェント可視化\n\nエージェント可視化では、 **Graphviz** を使用して、エージェントとその関係を構造化されたグラフィカル表現として生成できます。これは、アプリ"
},
{
"path": "docs/ja/voice/pipeline.md",
"chars": 2940,
"preview": "---\nsearch:\n exclude: true\n---\n# パイプラインとワークフロー\n\n[`VoicePipeline`][agents.voice.pipeline.VoicePipeline] は、エージェントのワークフローを"
},
{
"path": "docs/ja/voice/quickstart.md",
"chars": 4909,
"preview": "---\nsearch:\n exclude: true\n---\n# クイックスタート\n\n## 前提条件\n\nAgents SDK の基本的な [クイックスタート手順](../quickstart.md) に従い、仮想環境をセットアップしている"
},
{
"path": "docs/ja/voice/tracing.md",
"chars": 1113,
"preview": "---\nsearch:\n exclude: true\n---\n# トレーシング\n\n[エージェントがトレーシングされる](../tracing.md)のと同様に、音声パイプラインも自動的にトレーシングされます。\n\n基本的なトレーシング情報に"
},
{
"path": "docs/ko/agents.md",
"chars": 12701,
"preview": "---\nsearch:\n exclude: true\n---\n# 에이전트\n\n에이전트는 앱의 핵심 구성 요소입니다. 에이전트는 instructions, tools, 그리고 핸드오프, 가드레일, structured outp"
},
{
"path": "docs/ko/config.md",
"chars": 4025,
"preview": "---\nsearch:\n exclude: true\n---\n# 구성\n\n이 페이지에서는 기본 OpenAI 키 또는 client, 기본 OpenAI API 형태, 트레이싱 내보내기 기본값, 로깅 동작 등 애플리케이션 시작"
},
{
"path": "docs/ko/context.md",
"chars": 6041,
"preview": "---\nsearch:\n exclude: true\n---\n# 컨텍스트 관리\n\n컨텍스트는 여러 의미로 사용되는 용어입니다. 주로 고려할 수 있는 컨텍스트는 두 가지 주요 범주가 있습니다\n\n1. 코드에서 로컬로 사용할 "
},
{
"path": "docs/ko/examples.md",
"chars": 4201,
"preview": "---\nsearch:\n exclude: true\n---\n# 예제\n\n[repo](https://github.com/openai/openai-agents-python/tree/main/examples)의 example"
},
{
"path": "docs/ko/guardrails.md",
"chars": 7929,
"preview": "---\nsearch:\n exclude: true\n---\n# 가드레일\n\n가드레일을 사용하면 사용자 입력과 에이전트 출력에 대한 검사 및 검증을 수행할 수 있습니다. 예를 들어, 고객 요청을 돕기 위해 매우 똑똑한(따"
},
{
"path": "docs/ko/handoffs.md",
"chars": 7556,
"preview": "---\nsearch:\n exclude: true\n---\n# 핸드오프\n\n핸드오프를 사용하면 한 에이전트가 다른 에이전트에 작업을 위임할 수 있습니다. 이는 서로 다른 에이전트가 각기 다른 영역을 전문으로 하는 시나리"
},
{
"path": "docs/ko/human_in_the_loop.md",
"chars": 10997,
"preview": "---\nsearch:\n exclude: true\n---\n# 휴먼인더루프 (HITL)\n\n휴먼인더루프 (HITL) 흐름을 사용해 민감한 도구 호출을 사람이 승인하거나 거절할 때까지 에이전트 실행을 일시 중지할 수 있습"
},
{
"path": "docs/ko/index.md",
"chars": 2953,
"preview": "---\nsearch:\n exclude: true\n---\n# OpenAI Agents SDK\n\n[OpenAI Agents SDK](https://github.com/openai/openai-agents-python)"
},
{
"path": "docs/ko/mcp.md",
"chars": 15179,
"preview": "---\nsearch:\n exclude: true\n---\n# Model context protocol (MCP)\n\n[Model context protocol](https://modelcontextprotocol.io"
},
{
"path": "docs/ko/models/index.md",
"chars": 20730,
"preview": "---\nsearch:\n exclude: true\n---\n# 모델\n\nAgents SDK 는 OpenAI 모델을 즉시 사용할 수 있도록 두 가지 방식으로 지원합니다:\n\n- **권장**: 새 [Responses AP"
},
{
"path": "docs/ko/models/litellm.md",
"chars": 191,
"preview": "---\nsearch:\n exclude: true\n---\n# LiteLLM\n\n<script>\n window.location.replace(\"../#litellm\");\n</script>\n\n이 페이지는 [Models의"
},
{
"path": "docs/ko/multi_agent.md",
"chars": 3024,
"preview": "---\nsearch:\n exclude: true\n---\n# 에이전트 오케스트레이션\n\n오케스트레이션은 앱에서 에이전트의 흐름을 의미합니다. 어떤 에이전트가 실행되고, 어떤 순서로 실행되며, 다음에 무엇이 일어날지를 "
},
{
"path": "docs/ko/quickstart.md",
"chars": 4699,
"preview": "---\nsearch:\n exclude: true\n---\n# 빠른 시작\n\n## 프로젝트 및 가상 환경 생성\n\n이 작업은 한 번만 하면 됩니다\n\n```bash\nmkdir my_project\ncd my_project\np"
},
{
"path": "docs/ko/realtime/guide.md",
"chars": 10752,
"preview": "---\nsearch:\n exclude: true\n---\n# 실시간 에이전트 가이드\n\n이 가이드는 OpenAI Agents SDK의 실시간 레이어가 OpenAI Realtime API에 어떻게 매핑되는지, 그리고 P"
},
{
"path": "docs/ko/realtime/quickstart.md",
"chars": 4641,
"preview": "---\nsearch:\n exclude: true\n---\n# 빠른 시작\n\nPython SDK 의 실시간 에이전트는 WebSocket 전송을 통해 OpenAI Realtime API 위에서 구축된 서버 측 저지연 에이"
},
{
"path": "docs/ko/realtime/transport.md",
"chars": 3276,
"preview": "---\nsearch:\n exclude: true\n---\n# 실시간 전송\n\n이 페이지를 사용해 실시간 에이전트가 Python 애플리케이션에 어떻게 맞는지 결정하세요\n\n!!! note \"Python SDK 경계\"\n\n "
},
{
"path": "docs/ko/release.md",
"chars": 3437,
"preview": "---\nsearch:\n exclude: true\n---\n# 릴리스 프로세스/변경 로그\n\n이 프로젝트는 `0.Y.Z` 형식을 사용하는 시맨틱 버저닝의 약간 수정된 버전을 따릅니다. 앞의 `0`은 SDK가 여전히 빠르"
},
{
"path": "docs/ko/repl.md",
"chars": 677,
"preview": "---\nsearch:\n exclude: true\n---\n# REPL 유틸리티\n\nSDK는 터미널에서 에이전트의 동작을 빠르고 대화형으로 테스트할 수 있도록 `run_demo_loop`를 제공합니다.\n\n```pytho"
},
{
"path": "docs/ko/results.md",
"chars": 8909,
"preview": "---\nsearch:\n exclude: true\n---\n# 결과\n\n`Runner.run` 메서드를 호출하면 두 가지 결과 타입 중 하나를 받습니다:\n\n- `Runner.run(...)` 또는 `Runner.ru"
},
{
"path": "docs/ko/running_agents.md",
"chars": 20095,
"preview": "---\nsearch:\n exclude: true\n---\n# 에이전트 실행\n\n[`Runner`][agents.run.Runner] 클래스를 통해 에이전트를 실행할 수 있습니다. 3가지 옵션이 있습니다:\n\n1. [`R"
},
{
"path": "docs/ko/sessions/advanced_sqlite_session.md",
"chars": 8147,
"preview": "---\nsearch:\n exclude: true\n---\n# 고급 SQLite 세션\n\n`AdvancedSQLiteSession`은 기본 `SQLiteSession`의 향상된 버전으로, 대화 브랜칭, 상세 사용량 분석"
},
{
"path": "docs/ko/sessions/encrypted_session.md",
"chars": 3879,
"preview": "---\nsearch:\n exclude: true\n---\n# 암호화된 세션\n\n`EncryptedSession`은 모든 세션 구현에 대해 투명한 암호화를 제공하며, 오래된 항목의 자동 만료로 대화 데이터를 안전하게 보"
},
{
"path": "docs/ko/sessions/index.md",
"chars": 20107,
"preview": "---\nsearch:\n exclude: true\n---\n# 세션\n\nAgents SDK 는 여러 에이전트 실행에 걸쳐 대화 기록을 자동으로 유지하는 내장 세션 메모리를 제공하여, 턴 사이에서 `.to_input_li"
},
{
"path": "docs/ko/sessions/sqlalchemy_session.md",
"chars": 1707,
"preview": "---\nsearch:\n exclude: true\n---\n# SQLAlchemy 세션\n\n`SQLAlchemySession`은 SQLAlchemy를 사용하여 프로덕션 준비가 된 세션 구현을 제공하며, 세션 저장소에 S"
},
{
"path": "docs/ko/sessions.md",
"chars": 12200,
"preview": "---\nsearch:\n exclude: true\n---\n# 세션\n\nAgents SDK는 여러 에이전트 실행(run) 간 대화 기록을 자동으로 유지하는 내장 세션 메모리를 제공합니다. 이를 통해 턴 사이에 `.to_"
},
{
"path": "docs/ko/streaming.md",
"chars": 5850,
"preview": "---\nsearch:\n exclude: true\n---\n# 스트리밍\n\n스트리밍을 사용하면 에이전트 실행이 진행되는 동안 업데이트를 구독할 수 있습니다. 이는 최종 사용자에게 진행 상황 업데이트와 부분 응답을 보여주"
},
{
"path": "docs/ko/tools.md",
"chars": 30676,
"preview": "---\nsearch:\n exclude: true\n---\n# 도구\n\n도구를 사용하면 에이전트가 데이터 가져오기, 코드 실행, 외부 API 호출, 심지어 컴퓨터 사용과 같은 작업을 수행할 수 있습니다. SDK는 다섯 "
},
{
"path": "docs/ko/tracing.md",
"chars": 8144,
"preview": "---\nsearch:\n exclude: true\n---\n# 트레이싱\n\nAgents SDK에는 기본 제공 트레이싱이 포함되어 있어 에이전트 실행 중 발생하는 이벤트의 포괄적인 기록을 수집합니다: LLM 생성, 도구 "
},
{
"path": "docs/ko/usage.md",
"chars": 3180,
"preview": "---\nsearch:\n exclude: true\n---\n# 사용법\n\nAgents SDK는 모든 실행의 토큰 사용량을 자동으로 추적합니다. 실행 컨텍스트에서 이를 확인하고 비용 모니터링, 한도 적용, 분석 기록에 활"
},
{
"path": "docs/ko/visualization.md",
"chars": 2354,
"preview": "---\nsearch:\n exclude: true\n---\n# 에이전트 시각화\n\n에이전트 시각화를 사용하면 **Graphviz**를 통해 에이전트와 그 관계를 구조화된 그래픽 표현으로 생성할 수 있습니다. 이는 애플리"
},
{
"path": "docs/ko/voice/pipeline.md",
"chars": 2998,
"preview": "---\nsearch:\n exclude: true\n---\n# 파이프라인과 워크플로\n\n[`VoicePipeline`][agents.voice.pipeline.VoicePipeline]은 에이전트 워크플로를 음성 앱으로"
},
{
"path": "docs/ko/voice/quickstart.md",
"chars": 4907,
"preview": "---\nsearch:\n exclude: true\n---\n# 빠른 시작\n\n## 사전 요구사항\n\nAgents SDK의 기본 [빠른 시작 안내](../quickstart.md)를 따랐는지 확인하고 가상 환경을 설정하세요"
},
{
"path": "docs/ko/voice/tracing.md",
"chars": 1127,
"preview": "---\nsearch:\n exclude: true\n---\n# 트레이싱\n\n[에이전트가 트레이싱되는](../tracing.md) 방식과 마찬가지로, 음성 파이프라인도 자동으로 트레이싱됩니다.\n\n기본적인 트레이싱 정보는 "
},
{
"path": "docs/llms-full.txt",
"chars": 15038,
"preview": "# OpenAI Agents SDK Documentation (Full Context)\n\n> Extended reference map for the OpenAI Agents SDK documentation site."
},
{
"path": "docs/llms.txt",
"chars": 6849,
"preview": "# OpenAI Agents SDK Documentation\n\n> Official documentation for building production-ready agentic applications with the "
},
{
"path": "docs/mcp.md",
"chars": 18823,
"preview": "# Model context protocol (MCP)\n\nThe [Model context protocol](https://modelcontextprotocol.io/introduction) (MCP) standar"
},
{
"path": "docs/models/index.md",
"chars": 26893,
"preview": "# Models\n\nThe Agents SDK comes with out-of-the-box support for OpenAI models in two flavors:\n\n- **Recommended**: the ["
},
{
"path": "docs/models/litellm.md",
"chars": 205,
"preview": "# LiteLLM\n\n<script>\n window.location.replace(\"../#litellm\");\n</script>\n\nThis page moved to the [LiteLLM section in Mode"
},
{
"path": "docs/multi_agent.md",
"chars": 4901,
"preview": "# Agent orchestration\n\nOrchestration refers to the flow of agents in your app. Which agents run, in what order, and how "
},
{
"path": "docs/quickstart.md",
"chars": 5682,
"preview": "# Quickstart\n\n## Create a project and virtual environment\n\nYou'll only need to do this once.\n\n```bash\nmkdir my_project\nc"
},
{
"path": "docs/realtime/guide.md",
"chars": 13559,
"preview": "# Realtime agents guide\n\nThis guide explains how the OpenAI Agents SDK's realtime layer maps onto the OpenAI Realtime AP"
},
{
"path": "docs/realtime/quickstart.md",
"chars": 5636,
"preview": "# Quickstart\n\nRealtime agents in the Python SDK are server-side, low-latency agents built on the OpenAI Realtime API ove"
},
{
"path": "docs/realtime/transport.md",
"chars": 4463,
"preview": "# Realtime transport\n\nUse this page to decide how realtime agents fit into your Python application.\n\n!!! note \"Python SD"
},
{
"path": "docs/ref/agent.md",
"chars": 29,
"preview": "# `Agents`\n\n::: agents.agent\n"
},
{
"path": "docs/ref/agent_output.md",
"chars": 42,
"preview": "# `Agent output`\n\n::: agents.agent_output\n"
},
{
"path": "docs/ref/agent_tool_input.md",
"chars": 50,
"preview": "# `Agent Tool Input`\n\n::: agents.agent_tool_input\n"
},
{
"path": "docs/ref/agent_tool_state.md",
"chars": 50,
"preview": "# `Agent Tool State`\n\n::: agents.agent_tool_state\n"
},
{
"path": "docs/ref/apply_diff.md",
"chars": 38,
"preview": "# `Apply Diff`\n\n::: agents.apply_diff\n"
},
{
"path": "docs/ref/computer.md",
"chars": 34,
"preview": "# `Computer`\n\n::: agents.computer\n"
},
{
"path": "docs/ref/editor.md",
"chars": 30,
"preview": "# `Editor`\n\n::: agents.editor\n"
},
{
"path": "docs/ref/exceptions.md",
"chars": 38,
"preview": "# `Exceptions`\n\n::: agents.exceptions\n"
},
{
"path": "docs/ref/extensions/experimental/codex/codex.md",
"chars": 58,
"preview": "# `Codex`\n\n::: agents.extensions.experimental.codex.codex\n"
},
{
"path": "docs/ref/extensions/experimental/codex/codex_options.md",
"chars": 74,
"preview": "# `Codex Options`\n\n::: agents.extensions.experimental.codex.codex_options\n"
},
{
"path": "docs/ref/extensions/experimental/codex/codex_tool.md",
"chars": 68,
"preview": "# `Codex Tool`\n\n::: agents.extensions.experimental.codex.codex_tool\n"
},
{
"path": "docs/ref/extensions/experimental/codex/events.md",
"chars": 60,
"preview": "# `Events`\n\n::: agents.extensions.experimental.codex.events\n"
},
{
"path": "docs/ref/extensions/experimental/codex/exec.md",
"chars": 56,
"preview": "# `Exec`\n\n::: agents.extensions.experimental.codex.exec\n"
},
{
"path": "docs/ref/extensions/experimental/codex/items.md",
"chars": 58,
"preview": "# `Items`\n\n::: agents.extensions.experimental.codex.items\n"
},
{
"path": "docs/ref/extensions/experimental/codex/output_schema_file.md",
"chars": 84,
"preview": "# `Output Schema File`\n\n::: agents.extensions.experimental.codex.output_schema_file\n"
},
{
"path": "docs/ref/extensions/experimental/codex/payloads.md",
"chars": 64,
"preview": "# `Payloads`\n\n::: agents.extensions.experimental.codex.payloads\n"
},
{
"path": "docs/ref/extensions/experimental/codex/thread.md",
"chars": 60,
"preview": "# `Thread`\n\n::: agents.extensions.experimental.codex.thread\n"
},
{
"path": "docs/ref/extensions/experimental/codex/thread_options.md",
"chars": 76,
"preview": "# `Thread Options`\n\n::: agents.extensions.experimental.codex.thread_options\n"
},
{
"path": "docs/ref/extensions/experimental/codex/turn_options.md",
"chars": 72,
"preview": "# `Turn Options`\n\n::: agents.extensions.experimental.codex.turn_options\n"
},
{
"path": "docs/ref/extensions/handoff_filters.md",
"chars": 59,
"preview": "# `Handoff filters`\n\n::: agents.extensions.handoff_filters\n"
},
{
"path": "docs/ref/extensions/handoff_prompt.md",
"chars": 175,
"preview": "# `Handoff prompt`\n\n::: agents.extensions.handoff_prompt\n\n options:\n members:\n - RECOMMENDED_PROMPT"
},
{
"path": "docs/ref/extensions/litellm.md",
"chars": 63,
"preview": "# `LiteLLM Models`\n\n::: agents.extensions.models.litellm_model\n"
},
{
"path": "docs/ref/extensions/memory/advanced_sqlite_session.md",
"chars": 101,
"preview": "# `AdvancedSQLiteSession`\n\n::: agents.extensions.memory.advanced_sqlite_session.AdvancedSQLiteSession"
},
{
"path": "docs/ref/extensions/memory/async_sqlite_session.md",
"chars": 76,
"preview": "# `Async Sqlite Session`\n\n::: agents.extensions.memory.async_sqlite_session\n"
},
{
"path": "docs/ref/extensions/memory/dapr_session.md",
"chars": 71,
"preview": "# `DaprSession`\n\n::: agents.extensions.memory.dapr_session.DaprSession\n"
},
{
"path": "docs/ref/extensions/memory/encrypt_session.md",
"chars": 84,
"preview": "# `EncryptedSession`\n\n::: agents.extensions.memory.encrypt_session.EncryptedSession\n"
},
{
"path": "docs/ref/extensions/memory/redis_session.md",
"chars": 73,
"preview": "# `RedisSession`\n\n::: agents.extensions.memory.redis_session.RedisSession"
},
{
"path": "docs/ref/extensions/memory/sqlalchemy_session.md",
"chars": 89,
"preview": "# `SQLAlchemySession`\n\n::: agents.extensions.memory.sqlalchemy_session.SQLAlchemySession\n"
},
{
"path": "docs/ref/extensions/models/litellm_model.md",
"chars": 62,
"preview": "# `LiteLLM Model`\n\n::: agents.extensions.models.litellm_model\n"
},
{
"path": "docs/ref/extensions/models/litellm_provider.md",
"chars": 68,
"preview": "# `LiteLLM Provider`\n\n::: agents.extensions.models.litellm_provider\n"
},
{
"path": "docs/ref/extensions/tool_output_trimmer.md",
"chars": 67,
"preview": "# `Tool Output Trimmer`\n\n::: agents.extensions.tool_output_trimmer\n"
},
{
"path": "docs/ref/extensions/visualization.md",
"chars": 55,
"preview": "# `Visualization`\n\n::: agents.extensions.visualization\n"
},
{
"path": "docs/ref/function_schema.md",
"chars": 48,
"preview": "# `Function schema`\n\n::: agents.function_schema\n"
},
{
"path": "docs/ref/guardrail.md",
"chars": 37,
"preview": "# `Guardrails`\n\n::: agents.guardrail\n"
},
{
"path": "docs/ref/handoffs/history.md",
"chars": 41,
"preview": "# `History`\n\n::: agents.handoffs.history\n"
},
{
"path": "docs/ref/handoffs.md",
"chars": 34,
"preview": "# `Handoffs`\n\n::: agents.handoffs\n"
},
{
"path": "docs/ref/index.md",
"chars": 463,
"preview": "# Agents module\n\n::: agents\n\n options:\n members:\n - set_default_openai_key\n - set_defaul"
},
{
"path": "docs/ref/items.md",
"chars": 28,
"preview": "# `Items`\n\n::: agents.items\n"
},
{
"path": "docs/ref/lifecycle.md",
"chars": 77,
"preview": "# `Lifecycle`\n\n::: agents.lifecycle\n\n options:\n show_source: false\n"
},
{
"path": "docs/ref/logger.md",
"chars": 30,
"preview": "# `Logger`\n\n::: agents.logger\n"
},
{
"path": "docs/ref/mcp/manager.md",
"chars": 36,
"preview": "# `Manager`\n\n::: agents.mcp.manager\n"
},
{
"path": "docs/ref/mcp/server.md",
"chars": 39,
"preview": "# `MCP Servers`\n\n::: agents.mcp.server\n"
},
{
"path": "docs/ref/mcp/util.md",
"chars": 34,
"preview": "# `MCP Util`\n\n::: agents.mcp.util\n"
},
{
"path": "docs/ref/memory/openai_conversations_session.md",
"chars": 81,
"preview": "# `Openai Conversations Session`\n\n::: agents.memory.openai_conversations_session\n"
},
{
"path": "docs/ref/memory/openai_responses_compaction_session.md",
"chars": 95,
"preview": "# `Openai Responses Compaction Session`\n\n::: agents.memory.openai_responses_compaction_session\n"
},
{
"path": "docs/ref/memory/session.md",
"chars": 39,
"preview": "# `Session`\n\n::: agents.memory.session\n"
},
{
"path": "docs/ref/memory/session_settings.md",
"chars": 57,
"preview": "# `Session Settings`\n\n::: agents.memory.session_settings\n"
},
{
"path": "docs/ref/memory/sqlite_session.md",
"chars": 53,
"preview": "# `Sqlite Session`\n\n::: agents.memory.sqlite_session\n"
},
{
"path": "docs/ref/memory/util.md",
"chars": 33,
"preview": "# `Util`\n\n::: agents.memory.util\n"
},
{
"path": "docs/ref/memory.md",
"chars": 150,
"preview": "# Memory\n\n::: agents.memory\n\n options:\n members:\n - Session\n - SQLiteSession\n "
},
{
"path": "docs/ref/model_settings.md",
"chars": 46,
"preview": "# `Model settings`\n\n::: agents.model_settings\n"
},
{
"path": "docs/ref/models/chatcmpl_converter.md",
"chars": 61,
"preview": "# `Chatcmpl Converter`\n\n::: agents.models.chatcmpl_converter\n"
},
{
"path": "docs/ref/models/chatcmpl_helpers.md",
"chars": 57,
"preview": "# `Chatcmpl Helpers`\n\n::: agents.models.chatcmpl_helpers\n"
},
{
"path": "docs/ref/models/chatcmpl_stream_handler.md",
"chars": 71,
"preview": "# `Chatcmpl Stream Handler`\n\n::: agents.models.chatcmpl_stream_handler\n"
},
{
"path": "docs/ref/models/default_models.md",
"chars": 53,
"preview": "# `Default Models`\n\n::: agents.models.default_models\n"
},
{
"path": "docs/ref/models/fake_id.md",
"chars": 39,
"preview": "# `Fake Id`\n\n::: agents.models.fake_id\n"
},
{
"path": "docs/ref/models/interface.md",
"chars": 49,
"preview": "# `Model interface`\n\n::: agents.models.interface\n"
},
{
"path": "docs/ref/models/multi_provider.md",
"chars": 53,
"preview": "# `Multi Provider`\n\n::: agents.models.multi_provider\n"
},
{
"path": "docs/ref/models/openai_chatcompletions.md",
"chars": 76,
"preview": "# `OpenAI Chat Completions model`\n\n::: agents.models.openai_chatcompletions\n"
},
{
"path": "docs/ref/models/openai_provider.md",
"chars": 55,
"preview": "# `OpenAI Provider`\n\n::: agents.models.openai_provider\n"
},
{
"path": "docs/ref/models/openai_responses.md",
"chars": 63,
"preview": "# `OpenAI Responses model`\n\n::: agents.models.openai_responses\n"
},
{
"path": "docs/ref/prompts.md",
"chars": 32,
"preview": "# `Prompts`\n\n::: agents.prompts\n"
}
]
// ... and 670 more files (download for full content)
About this extraction
This page contains the full source code of the openai/openai-agents-python GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 870 files (6.4 MB), approximately 1.7M tokens, and a symbol index with 6257 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.