[
  {
    "path": ".github/actions/uv_setup/action.yml",
    "content": "# TODO: https://docs.astral.sh/uv/guides/integration/github/#caching\n\nname: uv-install\ndescription: Set up Python and uv\n\ninputs:\n  python-version:\n    description: Python version, supporting MAJOR.MINOR only\n    required: true\n\nenv:\n  UV_VERSION: \"0.5.25\"\n\nruns:\n  using: composite\n  steps:\n    - name: Install uv and set the python version\n      uses: astral-sh/setup-uv@v5\n      with:\n        version: ${{ env.UV_VERSION }}\n        python-version: ${{ inputs.python-version }}\n"
  },
  {
    "path": ".github/workflows/_lint.yml",
    "content": "name: lint\n\non:\n  workflow_call:\n    inputs:\n      working-directory:\n        required: true\n        type: string\n        description: \"From which folder this pipeline executes\"\n      python-version:\n        required: true\n        type: string\n        description: \"Python version to use\"\n\nenv:\n  WORKDIR: ${{ inputs.working-directory == '' && '.' || inputs.working-directory }}\n\n  # This env var allows us to get inline annotations when ruff has complaints.\n  RUFF_OUTPUT_FORMAT: github\n\n  UV_FROZEN: \"true\"\n\npermissions:\n  contents: read\n\njobs:\n  build:\n    name: \"make lint #${{ inputs.python-version }}\"\n    runs-on: ubuntu-latest\n    timeout-minutes: 20\n    steps:\n      - uses: actions/checkout@v4\n\n      - name: Set up Python ${{ inputs.python-version }} + uv\n        uses: \"./.github/actions/uv_setup\"\n        with:\n          python-version: ${{ inputs.python-version }}\n\n      - name: Install dependencies\n        working-directory: ${{ inputs.working-directory }}\n        run: |\n          uv sync --group test\n\n      - name: Analysing the code with our lint\n        working-directory: ${{ inputs.working-directory }}\n        run: |\n          make lint\n"
  },
  {
    "path": ".github/workflows/_test.yml",
    "content": "name: test\n\non:\n  workflow_call:\n    inputs:\n      working-directory:\n        required: true\n        type: string\n        description: \"From which folder this pipeline executes\"\n      python-version:\n        required: true\n        type: string\n        description: \"Python version to use\"\n\nenv:\n  UV_FROZEN: \"true\"\n  UV_NO_SYNC: \"true\"\n\npermissions:\n  contents: read\n\njobs:\n  build:\n    defaults:\n      run:\n        working-directory: ${{ inputs.working-directory }}\n    runs-on: ubuntu-latest\n    timeout-minutes: 20\n    name: \"make test #${{ inputs.python-version }}\"\n    steps:\n      - uses: actions/checkout@v4\n\n      - name: Set up Python ${{ inputs.python-version }} + uv\n        uses: \"./.github/actions/uv_setup\"\n        id: setup-python\n        with:\n          python-version: ${{ inputs.python-version }}\n      - name: Install dependencies\n        shell: bash\n        run: uv sync --group test\n\n      - name: Run core tests\n        shell: bash\n        run: |\n          make test\n"
  },
  {
    "path": ".github/workflows/ci.yml",
    "content": "---\nname: Run CI Tests\n\non:\n  push:\n    branches: [ main ]\n  pull_request:\n  workflow_dispatch:  # Allows to trigger the workflow manually in GitHub UI\n\n# If another push to the same PR or branch happens while this workflow is still running,\n# cancel the earlier run in favor of the next run.\n#\n# There's no point in testing an outdated version of the code. GitHub only allows\n# a limited number of job runners to be active at the same time, so it's better to cancel\n# pointless jobs early so that more useful jobs can run sooner.\nconcurrency:\n  group: ${{ github.workflow }}-${{ github.ref }}\n  cancel-in-progress: true\n\npermissions:\n  contents: read\n\njobs:\n  lint:\n    strategy:\n      matrix:\n        # Only lint on the min and max supported Python versions.\n        # It's extremely unlikely that there's a lint issue on any version in between\n        # that doesn't show up on the min or max versions.\n        #\n        # GitHub rate-limits how many jobs can be running at any one time.\n        # Starting new jobs is also relatively slow,\n        # so linting on fewer versions makes CI faster.\n        python-version:\n          - \"3.12\"\n    uses:\n      ./.github/workflows/_lint.yml\n    with:\n      working-directory: .\n      python-version: ${{ matrix.python-version }}\n    secrets: inherit\n  test:\n    strategy:\n      matrix:\n        # Only lint on the min and max supported Python versions.\n        # It's extremely unlikely that there's a lint issue on any version in between\n        # that doesn't show up on the min or max versions.\n        #\n        # GitHub rate-limits how many jobs can be running at any one time.\n        # Starting new jobs is also relatively slow,\n        # so linting on fewer versions makes CI faster.\n        python-version:\n          - \"3.10\"\n          - \"3.12\"\n    uses:\n      ./.github/workflows/_test.yml\n    with:\n      working-directory: .\n      python-version: ${{ matrix.python-version }}\n    secrets: inherit\n  ci_success:\n    name: \"CI Success\"\n    needs: [lint, test]\n    if: |\n      always()\n    runs-on: ubuntu-latest\n    env:\n      JOBS_JSON: ${{ toJSON(needs) }}\n      RESULTS_JSON: ${{ toJSON(needs.*.result) }}\n      EXIT_CODE: ${{!contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled') && '0' || '1'}}\n    steps:\n      - name: \"CI Success\"\n        run: |\n          echo $JOBS_JSON\n          echo $RESULTS_JSON\n          echo \"Exiting with $EXIT_CODE\"\n          exit $EXIT_CODE\n\n"
  },
  {
    "path": ".github/workflows/release.yml",
    "content": "name: release\nrun-name: Release ${{ inputs.working-directory }} by @${{ github.actor }}\non:\n  workflow_call:\n    inputs:\n      working-directory:\n        required: true\n        type: string\n        description: \"From which folder this pipeline executes\"\n  workflow_dispatch:\n    inputs:\n      working-directory:\n        description: \"From which folder this pipeline executes\"\n        default: \".\"\n      dangerous-nonmain-release:\n        required: false\n        type: boolean\n        default: false\n        description: \"Release from a non-main branch (danger!)\"\n\nenv:\n  PYTHON_VERSION: \"3.11\"\n  UV_FROZEN: \"true\"\n  UV_NO_SYNC: \"true\"\n\njobs:\n  build:\n    if: github.ref == 'refs/heads/main' || inputs.dangerous-nonmain-release\n    environment: Scheduled testing\n    runs-on: ubuntu-latest\n    permissions:\n      contents: read\n\n    outputs:\n      pkg-name: ${{ steps.check-version.outputs.pkg-name }}\n      version: ${{ steps.check-version.outputs.version }}\n\n    steps:\n      - uses: actions/checkout@v4\n\n      - name: Set up Python + uv\n        uses: \"./.github/actions/uv_setup\"\n        with:\n          python-version: ${{ env.PYTHON_VERSION }}\n\n      # We want to keep this build stage *separate* from the release stage,\n      # so that there's no sharing of permissions between them.\n      # The release stage has trusted publishing and GitHub repo contents write access,\n      # and we want to keep the scope of that access limited just to the release job.\n      # Otherwise, a malicious `build` step (e.g. via a compromised dependency)\n      # could get access to our GitHub or PyPI credentials.\n      #\n      # Per the trusted publishing GitHub Action:\n      # > It is strongly advised to separate jobs for building [...]\n      # > from the publish job.\n      # https://github.com/pypa/gh-action-pypi-publish#non-goals\n      - name: Build project for distribution\n        run: uv build\n      - name: Upload build\n        uses: actions/upload-artifact@v4\n        with:\n          name: dist\n          path: ${{ inputs.working-directory }}/dist/\n\n      - name: Check Version\n        id: check-version\n        shell: python\n        working-directory: ${{ inputs.working-directory }}\n        run: |\n          import os\n          import tomllib\n          with open(\"pyproject.toml\", \"rb\") as f:\n              data = tomllib.load(f)\n          pkg_name = data[\"project\"][\"name\"]\n          version = data[\"project\"][\"version\"]\n          with open(os.environ[\"GITHUB_OUTPUT\"], \"a\") as f:\n              f.write(f\"pkg-name={pkg_name}\\n\")\n              f.write(f\"version={version}\\n\")\n  publish:\n    needs:\n      - build\n    runs-on: ubuntu-latest\n    permissions:\n      # This permission is used for trusted publishing:\n      # https://blog.pypi.org/posts/2023-04-20-introducing-trusted-publishers/\n      #\n      # Trusted publishing has to also be configured on PyPI for each package:\n      # https://docs.pypi.org/trusted-publishers/adding-a-publisher/\n      id-token: write\n\n    defaults:\n      run:\n        working-directory: ${{ inputs.working-directory }}\n\n    steps:\n      - uses: actions/checkout@v4\n\n      - name: Set up Python + uv\n        uses: \"./.github/actions/uv_setup\"\n        with:\n          python-version: ${{ env.PYTHON_VERSION }}\n\n      - uses: actions/download-artifact@v4\n        with:\n          name: dist\n          path: ${{ inputs.working-directory }}/dist/\n\n      - name: Publish package distributions to PyPI\n        uses: pypa/gh-action-pypi-publish@release/v1\n        with:\n          packages-dir: ${{ inputs.working-directory }}/dist/\n          verbose: true\n          print-hash: true\n          # Temp workaround since attestations are on by default as of gh-action-pypi-publish v1.11.0\n          attestations: false\n\n  mark-release:\n    needs:\n      - build\n      - publish\n    runs-on: ubuntu-latest\n    permissions:\n      # This permission is needed by `ncipollo/release-action` to\n      # create the GitHub release.\n      contents: write\n\n    defaults:\n      run:\n        working-directory: ${{ inputs.working-directory }}\n\n    steps:\n      - uses: actions/checkout@v4\n\n      - name: Set up Python + uv\n        uses: \"./.github/actions/uv_setup\"\n        with:\n          python-version: ${{ env.PYTHON_VERSION }}\n\n      - uses: actions/download-artifact@v4\n        with:\n          name: dist\n          path: ${{ inputs.working-directory }}/dist/\n\n      - name: Create Tag\n        uses: ncipollo/release-action@v1\n        with:\n          artifacts: \"dist/*\"\n          token: ${{ secrets.GITHUB_TOKEN }}\n          generateReleaseNotes: true\n          tag: ${{needs.build.outputs.pkg-name}}==${{ needs.build.outputs.version }}\n          body: ${{ needs.release-notes.outputs.release-body }}\n          commit: main\n          makeLatest: true"
  },
  {
    "path": ".gitignore",
    "content": "# Pyenv\n.python-version\n.ipynb_checkpoints/\n\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# Environments\n.venv\n.env\n\n# mypy\n.mypy_cache/\n.dmypy.json\ndmypy.json\n.DS_Store\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2025 LangChain, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "Makefile",
    "content": ".PHONY: all lint format test help\n\n# Default target executed when no arguments are given to make.\nall: help\n\n######################\n# TESTING AND COVERAGE\n######################\n\n# Define a variable for the test file path.\nTEST_FILE ?= tests/\n\ntest:\n\tuv run pytest -vv --disable-socket --allow-unix-socket $(TEST_FILE)\n\ntest_watch:\n\tuv run ptw . -- $(TEST_FILE)\n\n\n######################\n# LINTING AND FORMATTING\n######################\n\n# Define a variable for Python and notebook files.\nlint format: PYTHON_FILES=.\nlint_diff format_diff: PYTHON_FILES=$(shell git diff --relative=. --name-only --diff-filter=d master | grep -E '\\.py$$|\\.ipynb$$')\n\nlint lint_diff:\n\t[ \"$(PYTHON_FILES)\" = \"\" ] ||\tuv run ruff format $(PYTHON_FILES) --diff\n\t[ \"$(PYTHON_FILES)\" = \"\" ] ||\tuv run ruff check $(PYTHON_FILES) --diff\n\t[ \"$(PYTHON_FILES)\" = \"\" ] || uvx ty check $(PYTHON_FILES)\n\nformat format_diff:\n\t[ \"$(PYTHON_FILES)\" = \"\" ] || uv run ruff check --fix $(PYTHON_FILES)\n\t[ \"$(PYTHON_FILES)\" = \"\" ] || uv run ruff format $(PYTHON_FILES)\n\n\t\n\n######################\n# HELP\n######################\n\nhelp:\n\t@echo '===================='\n\t@echo '-- LINTING --'\n\t@echo 'format                       - run code formatters'\n\t@echo 'lint                         - run linters'\n\t@echo '-- TESTS --'\n\t@echo 'test                         - run unit tests'\n\t@echo 'test TEST_FILE=<test_file>   - run all tests in file'\n\t@echo '-- DOCUMENTATION tasks are from the top-level Makefile --'\n\n\n"
  },
  {
    "path": "README.md",
    "content": "# 🤖 LangGraph Multi-Agent Supervisor\n\n> **Note**: We now recommend using the **supervisor pattern directly via tools** rather than this library for most use cases. The tool-calling approach gives you more control over context engineering and is the recommended pattern in the [LangChain multi-agent guide](https://docs.langchain.com/oss/python/langchain/multi-agent). See our [supervisor tutorial](https://docs.langchain.com/oss/python/langchain/supervisor) for a step-by-step guide. We're making this library compatible with LangChain 1.0 to help users upgrade their existing code. If you find this library solves a problem that can't be easily addressed with the manual supervisor pattern, we'd love to hear about your use case!\n\nA Python library for creating hierarchical multi-agent systems using [LangGraph](https://github.com/langchain-ai/langgraph). Hierarchical systems are a type of [multi-agent](https://langchain-ai.github.io/langgraph/concepts/multi_agent) architecture where specialized agents are coordinated by a central **supervisor** agent. The supervisor controls all communication flow and task delegation, making decisions about which agent to invoke based on the current context and task requirements.\n\n## Features\n\n- 🤖 **Create a supervisor agent** to orchestrate multiple specialized agents\n- 🛠️ **Tool-based agent handoff mechanism** for communication between agents\n- 📝 **Flexible message history management** for conversation control\n\nThis library is built on top of [LangGraph](https://github.com/langchain-ai/langgraph), a powerful framework for building agent applications, and comes with out-of-box support for [streaming](https://langchain-ai.github.io/langgraph/how-tos/#streaming), [short-term and long-term memory](https://langchain-ai.github.io/langgraph/concepts/memory/) and [human-in-the-loop](https://langchain-ai.github.io/langgraph/concepts/human_in_the_loop/)\n\n## Installation\n\n```bash\npip install langgraph-supervisor\n```\n\n> [!Note]\n> LangGraph Supervisor requires Python >= 3.10\n\n## Quickstart\n\nHere's a simple example of a supervisor managing two specialized agents:\n\n![Supervisor Architecture](static/img/supervisor.png)\n\n```bash\npip install langgraph-supervisor langchain-openai\n\nexport OPENAI_API_KEY=<your_api_key>\n```\n\n```python\nfrom langchain_openai import ChatOpenAI\n\nfrom langgraph_supervisor import create_supervisor\nfrom langgraph.prebuilt import create_react_agent\n\nmodel = ChatOpenAI(model=\"gpt-4o\")\n\n# Create specialized agents\n\ndef add(a: float, b: float) -> float:\n    \"\"\"Add two numbers.\"\"\"\n    return a + b\n\ndef multiply(a: float, b: float) -> float:\n    \"\"\"Multiply two numbers.\"\"\"\n    return a * b\n\ndef web_search(query: str) -> str:\n    \"\"\"Search the web for information.\"\"\"\n    return (\n        \"Here are the headcounts for each of the FAANG companies in 2024:\\n\"\n        \"1. **Facebook (Meta)**: 67,317 employees.\\n\"\n        \"2. **Apple**: 164,000 employees.\\n\"\n        \"3. **Amazon**: 1,551,000 employees.\\n\"\n        \"4. **Netflix**: 14,000 employees.\\n\"\n        \"5. **Google (Alphabet)**: 181,269 employees.\"\n    )\n\nmath_agent = create_react_agent(\n    model=model,\n    tools=[add, multiply],\n    name=\"math_expert\",\n    prompt=\"You are a math expert. Always use one tool at a time.\"\n)\n\nresearch_agent = create_react_agent(\n    model=model,\n    tools=[web_search],\n    name=\"research_expert\",\n    prompt=\"You are a world class researcher with access to web search. Do not do any math.\"\n)\n\n# Create supervisor workflow\nworkflow = create_supervisor(\n    [research_agent, math_agent],\n    model=model,\n    prompt=(\n        \"You are a team supervisor managing a research expert and a math expert. \"\n        \"For current events, use research_agent. \"\n        \"For math problems, use math_agent.\"\n    )\n)\n\n# Compile and run\napp = workflow.compile()\nresult = app.invoke({\n    \"messages\": [\n        {\n            \"role\": \"user\",\n            \"content\": \"what's the combined headcount of the FAANG companies in 2024?\"\n        }\n    ]\n})\n```\n\n> [!TIP]\n> For developing, debugging, and deploying AI agents and LLM applications, see [LangSmith](https://docs.langchain.com/langsmith/home).\n\n## Message History Management\n\nYou can control how messages from worker agents are added to the overall conversation history of the multi-agent system:\n\nInclude full message history from an agent:\n\n![Full History](static/img/full_history.png)\n\n```python\nworkflow = create_supervisor(\n    agents=[agent1, agent2],\n    output_mode=\"full_history\"\n)\n```\n\nInclude only the final agent response:\n\n![Last Message](static/img/last_message.png)\n\n```python\nworkflow = create_supervisor(\n    agents=[agent1, agent2],\n    output_mode=\"last_message\"\n)\n```\n\n## Multi-level Hierarchies\n\nYou can create multi-level hierarchical systems by creating a supervisor that manages multiple supervisors.\n\n```python\nresearch_team = create_supervisor(\n    [research_agent, math_agent],\n    model=model,\n    supervisor_name=\"research_supervisor\"\n).compile(name=\"research_team\")\n\nwriting_team = create_supervisor(\n    [writing_agent, publishing_agent],\n    model=model,\n    supervisor_name=\"writing_supervisor\"\n).compile(name=\"writing_team\")\n\ntop_level_supervisor = create_supervisor(\n    [research_team, writing_team],\n    model=model,\n    supervisor_name=\"top_level_supervisor\"\n).compile(name=\"top_level_supervisor\")\n```\n\n## Adding Memory\n\nYou can add [short-term](https://langchain-ai.github.io/langgraph/how-tos/persistence/) and [long-term](https://langchain-ai.github.io/langgraph/how-tos/cross-thread-persistence/) [memory](https://langchain-ai.github.io/langgraph/concepts/memory/) to your supervisor multi-agent system. Since `create_supervisor()` returns an instance of `StateGraph` that needs to be compiled before use, you can directly pass a [checkpointer](https://langchain-ai.github.io/langgraph/reference/checkpoints/#langgraph.checkpoint.base.BaseCheckpointSaver) or a [store](https://langchain-ai.github.io/langgraph/reference/store/#langgraph.store.base.BaseStore) instance to the `.compile()` method:\n\n```python\nfrom langgraph.checkpoint.memory import InMemorySaver\nfrom langgraph.store.memory import InMemoryStore\n\ncheckpointer = InMemorySaver()\nstore = InMemoryStore()\n\nmodel = ...\nresearch_agent = ...\nmath_agent = ...\n\nworkflow = create_supervisor(\n    [research_agent, math_agent],\n    model=model,\n    prompt=\"You are a team supervisor managing a research expert and a math expert.\",\n)\n\n# Compile with checkpointer/store\napp = workflow.compile(\n    checkpointer=checkpointer,\n    store=store\n)\n```\n\n## How to customize\n\n### Customizing handoff tools\n\nBy default, the supervisor uses handoff tools created with the prebuilt `create_handoff_tool`. You can also create your own, custom handoff tools. Here are some ideas on how you can modify the default implementation:\n\n* change tool name and/or description\n* add tool call arguments for the LLM to populate, for example a task description for the next agent\n* change what data is passed to the subagent as part of the handoff: by default `create_handoff_tool` passes **full** message history (all of the messages generated in the supervisor up to this point), as well as a tool message indicating successful handoff.\n\nHere is an example of how to pass customized handoff tools to `create_supervisor`:\n\n```python\nfrom langgraph_supervisor import create_handoff_tool\nworkflow = create_supervisor(\n    [research_agent, math_agent],\n    tools=[\n        create_handoff_tool(agent_name=\"math_expert\", name=\"assign_to_math_expert\", description=\"Assign task to math expert\"),\n        create_handoff_tool(agent_name=\"research_expert\", name=\"assign_to_research_expert\", description=\"Assign task to research expert\")\n    ],\n    model=model,\n)\n```\n\nYou can also control whether the handoff tool invocation messages are added to the state. By default, they are added (`add_handoff_messages=True`), but you can disable this if you want a more concise history:\n\n```python\nworkflow = create_supervisor(\n    [research_agent, math_agent],\n    model=model,\n    add_handoff_messages=False\n)\n```\n\nAdditionally, you can customize the prefix used for the automatically generated handoff tools:\n\n```python\nworkflow = create_supervisor(\n    [research_agent, math_agent],\n    model=model,\n    handoff_tool_prefix=\"delegate_to\"\n)\n# This will create tools named: delegate_to_research_expert, delegate_to_math_expert\n```\n\nHere is an example of what a custom handoff tool might look like:\n\n```python\nfrom typing import Annotated\n\nfrom langchain_core.tools import tool, BaseTool, InjectedToolCallId\nfrom langchain_core.messages import ToolMessage\nfrom langgraph.types import Command\nfrom langgraph.prebuilt import InjectedState\nfrom langgraph_supervisor.handoff import METADATA_KEY_HANDOFF_DESTINATION\n\ndef create_custom_handoff_tool(*, agent_name: str, name: str | None, description: str | None) -> BaseTool:\n\n    @tool(name, description=description)\n    def handoff_to_agent(\n        # you can add additional tool call arguments for the LLM to populate\n        # for example, you can ask the LLM to populate a task description for the next agent\n        task_description: Annotated[str, \"Detailed description of what the next agent should do, including all of the relevant context.\"],\n        # you can inject the state of the agent that is calling the tool\n        state: Annotated[dict, InjectedState],\n        tool_call_id: Annotated[str, InjectedToolCallId],\n    ):\n        tool_message = ToolMessage(\n            content=f\"Successfully transferred to {agent_name}\",\n            name=name,\n            tool_call_id=tool_call_id,\n        )\n        messages = state[\"messages\"]\n        return Command(\n            goto=agent_name,\n            graph=Command.PARENT,\n            # NOTE: this is a state update that will be applied to the swarm multi-agent graph (i.e., the PARENT graph)\n            update={\n                \"messages\": messages + [tool_message],\n                \"active_agent\": agent_name,\n                # optionally pass the task description to the next agent\n                # NOTE: individual agents would need to have `task_description` in their state schema\n                # and would need to implement logic for how to consume it\n                \"task_description\": task_description,\n            },\n        )\n\n    handoff_to_agent.metadata = {METADATA_KEY_HANDOFF_DESTINATION: agent_name}\n    return handoff_to_agent\n```\n\n### Message Forwarding\n\nYou can equip the supervisor with a tool to directly forward the last message received from a worker agent straight to the final output of the graph using `create_forward_message_tool`. This is useful when the supervisor determines that the worker's response is sufficient and doesn't require further processing or summarization by the supervisor itself. It saves tokens for the supervisor and avoids potential misrepresentation of the worker's response through paraphrasing.\n\n```python\nfrom langgraph_supervisor.handoff import create_forward_message_tool\n\n# Assume research_agent and math_agent are defined as before\n\nforwarding_tool = create_forward_message_tool(\"supervisor\") # The argument is the name to assign to the resulting forwarded message\nworkflow = create_supervisor(\n    [research_agent, math_agent],\n    model=model,\n    # Pass the forwarding tool along with any other custom or default handoff tools\n    tools=[forwarding_tool]\n)\n```\n\nThis creates a tool named `forward_message` that the supervisor can invoke. The tool expects an argument `from_agent` specifying which agent's last message should be forwarded directly to the output.\n\n## Using Functional API \n\nHere's a simple example of a supervisor managing two specialized agentic workflows created using Functional API:\n\n```bash\npip install langgraph-supervisor langchain-openai\n\nexport OPENAI_API_KEY=<your_api_key>\n```\n\n```python\nfrom langgraph.prebuilt import create_react_agent\nfrom langgraph_supervisor import create_supervisor\n\nfrom langchain_openai import ChatOpenAI\n\nfrom langgraph.func import entrypoint, task\nfrom langgraph.graph import add_messages\n\nmodel = ChatOpenAI(model=\"gpt-4o\")\n\n# Create specialized agents\n\n# Functional API - Agent 1 (Joke Generator)\n@task\ndef generate_joke(messages):\n    \"\"\"First LLM call to generate initial joke\"\"\"\n    system_message = {\n        \"role\": \"system\", \n        \"content\": \"Write a short joke\"\n    }\n    msg = model.invoke(\n        [system_message] + messages\n    )\n    return msg\n\n@entrypoint()\ndef joke_agent(state):\n    joke = generate_joke(state['messages']).result()\n    messages = add_messages(state[\"messages\"], [joke])\n    return {\"messages\": messages}\n\njoke_agent.name = \"joke_agent\"\n\n# Graph API - Agent 2 (Research Expert)\ndef web_search(query: str) -> str:\n    \"\"\"Search the web for information.\"\"\"\n    return (\n        \"Here are the headcounts for each of the FAANG companies in 2024:\\n\"\n        \"1. **Facebook (Meta)**: 67,317 employees.\\n\"\n        \"2. **Apple**: 164,000 employees.\\n\"\n        \"3. **Amazon**: 1,551,000 employees.\\n\"\n        \"4. **Netflix**: 14,000 employees.\\n\"\n        \"5. **Google (Alphabet)**: 181,269 employees.\"\n    )\n\nresearch_agent = create_react_agent(\n    model=model,\n    tools=[web_search],\n    name=\"research_expert\",\n    prompt=\"You are a world class researcher with access to web search. Do not do any math.\"\n)\n\n# Create supervisor workflow\nworkflow = create_supervisor(\n    [research_agent, joke_agent],\n    model=model,\n    prompt=(\n        \"You are a team supervisor managing a research expert and a joke expert. \"\n        \"For current events, use research_agent. \"\n        \"For any jokes, use joke_agent.\"\n    )\n)\n\n# Compile and run\napp = workflow.compile()\nresult = app.invoke({\n    \"messages\": [\n        {\n            \"role\": \"user\",\n            \"content\": \"Share a joke to relax and start vibe coding for my next project idea.\"\n        }\n    ]\n})\n\nfor m in result[\"messages\"]:\n    m.pretty_print()\n```\n"
  },
  {
    "path": "langgraph_supervisor/__init__.py",
    "content": "from langgraph_supervisor.handoff import (\n    create_forward_message_tool,\n    create_handoff_tool,\n)\nfrom langgraph_supervisor.supervisor import create_supervisor\n\n__all__ = [\"create_supervisor\", \"create_handoff_tool\", \"create_forward_message_tool\"]\n"
  },
  {
    "path": "langgraph_supervisor/agent_name.py",
    "content": "import re\nfrom typing import Any, Literal, Sequence, TypeGuard, cast\n\nfrom langchain_core.language_models import LanguageModelLike\nfrom langchain_core.messages import (\n    AIMessage,\n    BaseMessage,\n    MessageLikeRepresentation,\n    convert_to_messages,\n)\nfrom langchain_core.prompt_values import PromptValue\nfrom langchain_core.runnables import RunnableLambda\n\nNAME_PATTERN = re.compile(r\"<name>(.*?)</name>\", re.DOTALL)\nCONTENT_PATTERN = re.compile(r\"<content>(.*?)</content>\", re.DOTALL)\n\nAgentNameMode = Literal[\"inline\"]\n\n\ndef _is_content_blocks_content(content: list[dict | str] | str) -> TypeGuard[list[dict]]:\n    return (\n        isinstance(content, list)\n        and len(content) > 0\n        and isinstance(content[0], dict)\n        and \"type\" in content[0]\n    )\n\n\ndef add_inline_agent_name(message: BaseMessage) -> BaseMessage:\n    \"\"\"Add name and content XML tags to the message content.\n\n    Examples:\n\n        >>> add_inline_agent_name(AIMessage(content=\"Hello\", name=\"assistant\"))\n        AIMessage(content=\"<name>assistant</name><content>Hello</content>\", name=\"assistant\")\n\n        >>> add_inline_agent_name(AIMessage(content=[{\"type\": \"text\", \"text\": \"Hello\"}], name=\"assistant\"))\n        AIMessage(content=[{\"type\": \"text\", \"text\": \"<name>assistant</name><content>Hello</content>\"}], name=\"assistant\")\n    \"\"\"\n    if not isinstance(message, AIMessage) or not message.name:\n        return message\n\n    formatted_message = message.model_copy()\n    if _is_content_blocks_content(message.content):\n        text_blocks = [block for block in message.content if block[\"type\"] == \"text\"]\n        non_text_blocks = [block for block in message.content if block[\"type\"] != \"text\"]\n        content = text_blocks[0][\"text\"] if text_blocks else \"\"\n        formatted_content = f\"<name>{message.name}</name><content>{content}</content>\"\n        formatted_message_content = [{\"type\": \"text\", \"text\": formatted_content}] + non_text_blocks\n        formatted_message.content = formatted_message_content\n    else:\n        formatted_message.content = (\n            f\"<name>{message.name}</name><content>{formatted_message.content}</content>\"\n        )\n    return formatted_message\n\n\ndef remove_inline_agent_name(message: BaseMessage) -> BaseMessage:\n    \"\"\"Remove explicit name and content XML tags from the AI message content.\n\n    Examples:\n\n        >>> remove_inline_agent_name(AIMessage(content=\"<name>assistant</name><content>Hello</content>\", name=\"assistant\"))\n        AIMessage(content=\"Hello\", name=\"assistant\")\n\n        >>> remove_inline_agent_name(AIMessage(content=[{\"type\": \"text\", \"text\": \"<name>assistant</name><content>Hello</content>\"}], name=\"assistant\"))\n        AIMessage(content=[{\"type\": \"text\", \"text\": \"Hello\"}], name=\"assistant\")\n    \"\"\"\n    if not isinstance(message, AIMessage) or not message.content:\n        return message\n\n    if is_content_blocks_content := _is_content_blocks_content(message.content):\n        text_blocks = [\n            block\n            for block in message.content\n            if isinstance(block, dict) and block[\"type\"] == \"text\"\n        ]\n        if not text_blocks:\n            return message\n\n        non_text_blocks = [\n            block\n            for block in message.content\n            if isinstance(block, dict) and block[\"type\"] != \"text\"\n        ]\n        content = cast(dict[str, Any], text_blocks[0])[\"text\"]\n    else:\n        content = message.content\n\n    name_match: re.Match | None = NAME_PATTERN.search(content)\n    content_match: re.Match | None = CONTENT_PATTERN.search(content)\n    if not name_match or not content_match:\n        return message\n\n    parsed_content = content_match.group(1)\n    parsed_message = message.model_copy()\n    if is_content_blocks_content:\n        content_blocks = non_text_blocks\n        if parsed_content:\n            content_blocks = [{\"type\": \"text\", \"text\": parsed_content}] + content_blocks\n\n        parsed_message.content = cast(list[str | dict], content_blocks)\n    else:\n        parsed_message.content = parsed_content\n    return parsed_message\n\n\ndef with_agent_name(\n    model: LanguageModelLike,\n    agent_name_mode: AgentNameMode,\n) -> LanguageModelLike:\n    \"\"\"Attach formatted agent names to the messages passed to and from a language model.\n\n    This is useful for making a message history with multiple agents more coherent.\n\n    NOTE: agent name is consumed from the message.name field.\n        If you're using an agent built with create_react_agent, name is automatically set.\n        If you're building a custom agent, make sure to set the name on the AI message returned by the LLM.\n\n    Args:\n        model: Language model to add agent name formatting to.\n        agent_name_mode: Use to specify how to expose the agent name to the LLM.\n            - \"inline\": Add the agent name directly into the content field of the AI message using XML-style tags.\n                Example: \"How can I help you\" -> \"<name>agent_name</name><content>How can I help you?</content>\".\n    \"\"\"\n    if agent_name_mode == \"inline\":\n        process_input_message = add_inline_agent_name\n        process_output_message = remove_inline_agent_name\n    else:\n        raise ValueError(\n            f\"Invalid agent name mode: {agent_name_mode}. Needs to be one of: {AgentNameMode.__args__}\"\n        )\n\n    def process_input_messages(\n        input: Sequence[MessageLikeRepresentation] | PromptValue,\n    ) -> list[BaseMessage]:\n        messages = convert_to_messages(input)\n        return [process_input_message(message) for message in messages]\n\n    chain = (\n        process_input_messages\n        | model\n        | RunnableLambda(process_output_message, name=\"process_output_message\")\n    )\n\n    return cast(LanguageModelLike, chain)\n"
  },
  {
    "path": "langgraph_supervisor/handoff.py",
    "content": "import re\nimport uuid\nfrom typing import TypeGuard, cast\n\nfrom langchain_core.messages import AIMessage, ToolCall, ToolMessage\nfrom langchain_core.tools import BaseTool, InjectedToolCallId, tool\nfrom langgraph.prebuilt import InjectedState\nfrom langgraph.types import Command, Send\nfrom typing_extensions import Annotated\n\nWHITESPACE_RE = re.compile(r\"\\s+\")\nMETADATA_KEY_HANDOFF_DESTINATION = \"__handoff_destination\"\nMETADATA_KEY_IS_HANDOFF_BACK = \"__is_handoff_back\"\n\n\ndef _normalize_agent_name(agent_name: str) -> str:\n    \"\"\"Normalize an agent name to be used inside the tool name.\"\"\"\n    return WHITESPACE_RE.sub(\"_\", agent_name.strip()).lower()\n\n\ndef _has_multiple_content_blocks(content: str | list[str | dict]) -> TypeGuard[list[dict]]:\n    \"\"\"Check if content contains multiple content blocks.\"\"\"\n    return isinstance(content, list) and len(content) > 1 and isinstance(content[0], dict)\n\n\ndef _remove_non_handoff_tool_calls(\n    last_ai_message: AIMessage, handoff_tool_call_id: str\n) -> AIMessage:\n    \"\"\"Remove tool calls that are not meant for the agent.\"\"\"\n    # if the supervisor is calling multiple agents/tools in parallel,\n    # we need to remove tool calls that are not meant for this agent\n    # to ensure that the resulting message history is valid\n    content = last_ai_message.content\n    if _has_multiple_content_blocks(content):\n        content = [\n            content_block\n            for content_block in content\n            if (content_block[\"type\"] == \"tool_use\" and content_block[\"id\"] == handoff_tool_call_id)\n            or content_block[\"type\"] != \"tool_use\"\n        ]\n\n    last_ai_message = AIMessage(\n        content=content,\n        tool_calls=[\n            tool_call\n            for tool_call in last_ai_message.tool_calls\n            if tool_call[\"id\"] == handoff_tool_call_id\n        ],\n        name=last_ai_message.name,\n        id=str(uuid.uuid4()),\n    )\n    return last_ai_message\n\n\ndef create_handoff_tool(\n    *,\n    agent_name: str,\n    name: str | None = None,\n    description: str | None = None,\n    add_handoff_messages: bool = True,\n) -> BaseTool:\n    \"\"\"Create a tool that can handoff control to the requested agent.\n\n    Args:\n        agent_name: The name of the agent to handoff control to, i.e. the name of the\n            agent node in the multi-agent graph.\n\n            Agent names should be simple, clear and unique, preferably in snake_case,\n            although you are only limited to the names accepted by LangGraph\n            nodes as well as the tool names accepted by LLM providers\n            (the tool name will look like this: `transfer_to_<agent_name>`).\n        name: Optional name of the tool to use for the handoff.\n\n            If not provided, the tool name will be `transfer_to_<agent_name>`.\n        description: Optional description for the handoff tool.\n\n            If not provided, the description will be `Ask agent <agent_name> for help`.\n        add_handoff_messages: Whether to add handoff messages to the message history.\n\n            If `False`, the handoff messages will be omitted from the message history.\n    \"\"\"\n    if name is None:\n        name = f\"transfer_to_{_normalize_agent_name(agent_name)}\"\n\n    if description is None:\n        description = f\"Ask agent '{agent_name}' for help\"\n\n    @tool(name, description=description)\n    def handoff_to_agent(\n        state: Annotated[dict, InjectedState],\n        tool_call_id: Annotated[str, InjectedToolCallId],\n    ) -> Command:\n        tool_message = ToolMessage(\n            content=f\"Successfully transferred to {agent_name}\",\n            name=name,\n            tool_call_id=tool_call_id,\n            response_metadata={METADATA_KEY_HANDOFF_DESTINATION: agent_name},\n        )\n        last_ai_message = cast(AIMessage, state[\"messages\"][-1])\n        # Handle parallel handoffs\n        if len(last_ai_message.tool_calls) > 1:\n            handoff_messages = state[\"messages\"][:-1]\n            if add_handoff_messages:\n                handoff_messages.extend(\n                    (\n                        _remove_non_handoff_tool_calls(last_ai_message, tool_call_id),\n                        tool_message,\n                    )\n                )\n            return Command(\n                graph=Command.PARENT,\n                # NOTE: we are using Send here to allow the ToolNode in langgraph.prebuilt\n                # to handle parallel handoffs by combining all Send commands into a single command\n                goto=[Send(agent_name, {**state, \"messages\": handoff_messages})],\n            )\n        # Handle single handoff\n        else:\n            if add_handoff_messages:\n                handoff_messages = state[\"messages\"] + [tool_message]\n            else:\n                handoff_messages = state[\"messages\"][:-1]\n            return Command(\n                goto=agent_name,\n                graph=Command.PARENT,\n                update={**state, \"messages\": handoff_messages},\n            )\n\n    handoff_to_agent.metadata = {METADATA_KEY_HANDOFF_DESTINATION: agent_name}\n    return handoff_to_agent\n\n\ndef create_handoff_back_messages(\n    agent_name: str, supervisor_name: str\n) -> tuple[AIMessage, ToolMessage]:\n    \"\"\"Create a pair of (AIMessage, ToolMessage) to add to the message history when returning control to the supervisor.\"\"\"\n    tool_call_id = str(uuid.uuid4())\n    tool_name = f\"transfer_back_to_{_normalize_agent_name(supervisor_name)}\"\n    tool_calls = [ToolCall(name=tool_name, args={}, id=tool_call_id)]\n    return (\n        AIMessage(\n            content=f\"Transferring back to {supervisor_name}\",\n            tool_calls=tool_calls,\n            name=agent_name,\n            response_metadata={METADATA_KEY_IS_HANDOFF_BACK: True},\n        ),\n        ToolMessage(\n            content=f\"Successfully transferred back to {supervisor_name}\",\n            name=tool_name,\n            tool_call_id=tool_call_id,\n            response_metadata={METADATA_KEY_IS_HANDOFF_BACK: True},\n        ),\n    )\n\n\ndef create_forward_message_tool(supervisor_name: str = \"supervisor\") -> BaseTool:\n    \"\"\"Create a tool the supervisor can use to forward a worker message by name.\n\n    This helps avoid information loss any time the supervisor rewrites a worker query\n    to the user and also can save some tokens.\n\n    Args:\n        supervisor_name: The name of the supervisor node (used for namespacing the tool).\n\n    Returns:\n        BaseTool: The `'forward_message'` tool.\n    \"\"\"\n    tool_name = \"forward_message\"\n    desc = (\n        \"Forwards the latest message from the specified agent to the user\"\n        \" without any changes. Use this to preserve information fidelity, avoid\"\n        \" misinterpretation of questions or responses, and save time.\"\n    )\n\n    @tool(tool_name, description=desc)\n    def forward_message(\n        from_agent: str,\n        state: Annotated[dict, InjectedState],\n    ) -> str | Command:\n        target_message = next(\n            (\n                m\n                for m in reversed(state[\"messages\"])\n                if isinstance(m, AIMessage)\n                and (m.name or \"\").lower() == from_agent.lower()\n                and not m.response_metadata.get(METADATA_KEY_IS_HANDOFF_BACK)\n            ),\n            None,\n        )\n        if not target_message:\n            found_names = set(\n                m.name for m in state[\"messages\"] if isinstance(m, AIMessage) and m.name\n            )\n            return (\n                f\"Could not find message from source agent {from_agent}. Found names: {found_names}\"\n            )\n        updates = [\n            AIMessage(\n                content=target_message.content,\n                name=supervisor_name,\n                id=str(uuid.uuid4()),\n            ),\n        ]\n\n        return Command(\n            graph=Command.PARENT,\n            # NOTE: this does nothing.\n            goto=\"__end__\",\n            # we also propagate the update to make sure the handoff messages are applied\n            # to the parent graph's state\n            update={**state, \"messages\": updates},\n        )\n\n    return forward_message\n"
  },
  {
    "path": "langgraph_supervisor/py.typed",
    "content": ""
  },
  {
    "path": "langgraph_supervisor/supervisor.py",
    "content": "import inspect\nfrom typing import Any, Callable, Literal, Optional, Sequence, Type, Union, cast, get_args\nfrom uuid import UUID, uuid5\nfrom warnings import warn\n\nfrom langchain_core.language_models import BaseChatModel, LanguageModelLike\nfrom langchain_core.messages import AnyMessage, ToolMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.tools import BaseTool\nfrom langgraph._internal._config import patch_configurable\nfrom langgraph._internal._runnable import RunnableCallable, RunnableLike\nfrom langgraph._internal._typing import DeprecatedKwargs\nfrom langgraph.graph import END, START, StateGraph\nfrom langgraph.graph.message import add_messages\nfrom langgraph.prebuilt import ToolNode\nfrom langgraph.prebuilt.chat_agent_executor import (\n    AgentState,  # type: ignore[deprecated]\n    AgentStateWithStructuredResponse,  # type: ignore[deprecated]\n    Prompt,\n    StateSchemaType,\n    StructuredResponseSchema,\n    _should_bind_tools,\n    create_react_agent,  # type: ignore[deprecated]\n)\nfrom langgraph.pregel import Pregel\nfrom langgraph.pregel.remote import RemoteGraph\nfrom typing_extensions import Annotated, TypedDict, Unpack\n\nfrom langgraph_supervisor.agent_name import AgentNameMode, with_agent_name\nfrom langgraph_supervisor.handoff import (\n    METADATA_KEY_HANDOFF_DESTINATION,\n    _normalize_agent_name,\n    create_handoff_back_messages,\n    create_handoff_tool,\n)\n\nOutputMode = Literal[\"full_history\", \"last_message\"]\n\"\"\"Mode for adding agent outputs to the message history in the multi-agent workflow\n\n- `full_history`: add the entire agent message history\n- `last_message`: add only the last message\n\"\"\"\n\n\nMODELS_NO_PARALLEL_TOOL_CALLS = {\"o3-mini\", \"o3\", \"o4-mini\"}\n\n\ndef _supports_disable_parallel_tool_calls(model: LanguageModelLike) -> bool:\n    if not isinstance(model, BaseChatModel):\n        return False\n\n    if (\n        model_name := getattr(model, \"model_name\", None)\n    ) and model_name in MODELS_NO_PARALLEL_TOOL_CALLS:\n        return False\n\n    if not hasattr(model, \"bind_tools\"):\n        return False\n\n    if \"parallel_tool_calls\" not in inspect.signature(model.bind_tools).parameters:\n        return False\n\n    return True\n\n\ndef _make_call_agent(\n    agent: Pregel[Any],\n    output_mode: OutputMode,\n    add_handoff_back_messages: bool,\n    supervisor_name: str,\n) -> RunnableCallable:\n    if output_mode not in get_args(OutputMode):\n        raise ValueError(\n            f\"Invalid agent output mode: {output_mode}. Needs to be one of {get_args(OutputMode)}\"\n        )\n\n    def _process_output(output: dict) -> dict:\n        messages = output[\"messages\"]\n        if output_mode == \"full_history\":\n            pass\n        elif output_mode == \"last_message\":\n            if isinstance(messages[-1], ToolMessage):\n                messages = messages[-2:]\n            else:\n                messages = messages[-1:]\n\n        else:\n            raise ValueError(\n                f\"Invalid agent output mode: {output_mode}. \"\n                f\"Needs to be one of {OutputMode.__args__}\"\n            )\n\n        if add_handoff_back_messages:\n            messages.extend(create_handoff_back_messages(agent.name, supervisor_name))\n\n        return {\n            **output,\n            \"messages\": messages,\n        }\n\n    def call_agent(state: dict, config: RunnableConfig) -> dict:\n        thread_id = config.get(\"configurable\", {}).get(\"thread_id\")\n        output = agent.invoke(\n            state,\n            patch_configurable(\n                config,\n                {\"thread_id\": str(uuid5(UUID(str(thread_id)), agent.name)) if thread_id else None},\n            )\n            if isinstance(agent, RemoteGraph)\n            else config,\n        )\n        return _process_output(output)\n\n    async def acall_agent(state: dict, config: RunnableConfig) -> dict:\n        thread_id = config.get(\"configurable\", {}).get(\"thread_id\")\n        output = await agent.ainvoke(\n            state,\n            patch_configurable(\n                config,\n                {\"thread_id\": str(uuid5(UUID(str(thread_id)), agent.name)) if thread_id else None},\n            )\n            if isinstance(agent, RemoteGraph)\n            else config,\n        )\n        return _process_output(output)\n\n    return RunnableCallable(call_agent, acall_agent)\n\n\ndef _get_handoff_destinations(tools: Sequence[BaseTool | Callable]) -> list[str]:\n    \"\"\"Extract handoff destinations from provided tools.\n    Args:\n        tools: List of tools to inspect.\n    Returns:\n        List of agent names that are handoff destinations.\n    \"\"\"\n    return [\n        tool.metadata[METADATA_KEY_HANDOFF_DESTINATION]\n        for tool in tools\n        if isinstance(tool, BaseTool)\n        and tool.metadata is not None\n        and METADATA_KEY_HANDOFF_DESTINATION in tool.metadata\n    ]\n\n\ndef _prepare_tool_node(\n    tools: list[BaseTool | Callable] | ToolNode | None,\n    handoff_tool_prefix: Optional[str],\n    add_handoff_messages: bool,\n    agent_names: set[str],\n) -> ToolNode:\n    \"\"\"Prepare the ToolNode to use in supervisor agent.\"\"\"\n    if isinstance(tools, ToolNode):\n        input_tool_node = tools\n        tool_classes = list(tools.tools_by_name.values())\n    elif tools:\n        input_tool_node = ToolNode(tools)\n        # get the tool functions wrapped in a tool class from the ToolNode\n        tool_classes = list(input_tool_node.tools_by_name.values())\n    else:\n        input_tool_node = None\n        tool_classes = []\n\n    handoff_destinations = _get_handoff_destinations(tool_classes)\n    if handoff_destinations:\n        if missing_handoff_destinations := set(agent_names) - set(handoff_destinations):\n            raise ValueError(\n                \"When providing custom handoff tools, you must provide them for all subagents. \"\n                f\"Missing handoff tools for agents '{missing_handoff_destinations}'.\"\n            )\n\n        # Handoff tools should be already provided here\n        tool_node = cast(ToolNode, input_tool_node)\n    else:\n        handoff_tools = [\n            create_handoff_tool(\n                agent_name=agent_name,\n                name=(\n                    None\n                    if handoff_tool_prefix is None\n                    else f\"{handoff_tool_prefix}{_normalize_agent_name(agent_name)}\"\n                ),\n                add_handoff_messages=add_handoff_messages,\n            )\n            for agent_name in agent_names\n        ]\n        all_tools = tool_classes + list(handoff_tools)\n\n        # re-wrap the combined tools in a ToolNode\n        # if the original input was a ToolNode, apply the same params\n        if input_tool_node is not None:\n            tool_node = ToolNode(\n                all_tools,\n                name=str(input_tool_node.name),\n                tags=list(input_tool_node.tags) if input_tool_node.tags else None,\n                handle_tool_errors=input_tool_node._handle_tool_errors,\n                messages_key=input_tool_node._messages_key,\n            )\n        else:\n            tool_node = ToolNode(all_tools)\n\n    return tool_node\n\n\nclass _OuterState(TypedDict):\n    \"\"\"The state of the supervisor workflow.\"\"\"\n\n    messages: Annotated[Sequence[AnyMessage], add_messages]\n\n\ndef create_supervisor(\n    agents: list[Pregel],\n    *,\n    model: LanguageModelLike,\n    tools: list[BaseTool | Callable] | ToolNode | None = None,\n    prompt: Prompt | None = None,\n    response_format: Optional[\n        Union[StructuredResponseSchema, tuple[str, StructuredResponseSchema]]\n    ] = None,\n    pre_model_hook: Optional[RunnableLike] = None,\n    post_model_hook: Optional[RunnableLike] = None,\n    parallel_tool_calls: bool = False,\n    state_schema: StateSchemaType | None = None,\n    context_schema: Type[Any] | None = None,\n    output_mode: OutputMode = \"last_message\",\n    add_handoff_messages: bool = True,\n    handoff_tool_prefix: Optional[str] = None,\n    add_handoff_back_messages: Optional[bool] = None,\n    supervisor_name: str = \"supervisor\",\n    include_agent_name: AgentNameMode | None = None,\n    **deprecated_kwargs: Unpack[DeprecatedKwargs],\n) -> StateGraph:\n    \"\"\"Create a multi-agent supervisor.\n\n    Args:\n        agents: List of agents to manage.\n\n            An agent can be a LangGraph [`CompiledStateGraph`][langgraph.graph.state.CompiledStateGraph],\n            a functional API workflow, or any other [Pregel][langgraph.pregel.Pregel]\n            object.\n        model: Language model to use for the supervisor\n        tools: Tools to use for the supervisor\n        prompt: Optional prompt to use for the supervisor.\n\n            Can be one of:\n\n            - `str`: This is converted to a `SystemMessage` and added to the beginning of the list of messages in `state[\"messages\"]`.\n            - `SystemMessage`: this is added to the beginning of the list of messages in `state[\"messages\"]`.\n            - `Callable`: This function should take in full graph state and the output is then passed to the language model.\n            - `Runnable`: This runnable should take in full graph state and the output is then passed to the language model.\n        response_format: An optional schema for the final supervisor output.\n\n            If provided, output will be formatted to match the given schema and returned in the `'structured_response'` state key.\n\n            If not provided, `structured_response` will not be present in the output state.\n\n            Can be passed in as:\n\n            - An OpenAI function/tool schema,\n            - A JSON Schema,\n            - A TypedDict class,\n            - A Pydantic class.\n            - A tuple `(prompt, schema)`, where schema is one of the above.\n                The prompt will be used together with the model that is being used to generate the structured response.\n\n            !!! Important\n                `response_format` requires the model to support `.with_structured_output`\n\n            !!! Note\n                `response_format` requires `structured_response` key in your state schema.\n\n                You can use the prebuilt `langgraph.prebuilt.chat_agent_executor.AgentStateWithStructuredResponse`.\n        pre_model_hook: An optional node to add before the LLM node in the supervisor agent (i.e., the node that calls the LLM).\n\n            Useful for managing long message histories (e.g., message trimming, summarization, etc.).\n\n            Pre-model hook must be a callable or a runnable that takes in current graph state and returns a state update in the form of\n\n            ```python\n            # At least one of `messages` or `llm_input_messages` MUST be provided\n            {\n                # If provided, will UPDATE the `messages` in the state\n                \"messages\": [RemoveMessage(id=REMOVE_ALL_MESSAGES), ...],\n                # If provided, will be used as the input to the LLM,\n                # and will NOT UPDATE `messages` in the state\n                \"llm_input_messages\": [...],\n                # Any other state keys that need to be propagated\n                ...\n            }\n            ```\n\n            !!! Important\n                At least one of `messages` or `llm_input_messages` MUST be provided and will be used as an input to the `agent` node.\n                The rest of the keys will be added to the graph state.\n\n            !!! Warning\n                If you are returning `messages` in the pre-model hook, you should OVERWRITE the `messages` key by doing the following:\n\n                ```python\n                {\n                    \"messages\": [RemoveMessage(id=REMOVE_ALL_MESSAGES), *new_messages]\n                    ...\n                }\n                ```\n        post_model_hook: An optional node to add after the LLM node in the supervisor agent (i.e., the node that calls the LLM).\n\n            Useful for implementing human-in-the-loop, guardrails, validation, or other post-processing.\n\n            Post-model hook must be a callable or a runnable that takes in current graph state and returns a state update.\n        parallel_tool_calls: Whether to allow the supervisor LLM to call tools in parallel (only OpenAI and Anthropic).\n\n            Use this to control whether the supervisor can hand off to multiple agents at once.\n\n            If `True`, will enable parallel tool calls.\n\n            If `False`, will disable parallel tool calls.\n\n            !!! Important\n                This is currently supported only by OpenAI and Anthropic models.\n                To control parallel tool calling for other providers, add explicit instructions for tool use to the system prompt.\n        state_schema: State schema to use for the supervisor graph.\n        context_schema: Specifies the schema for the context object that will be passed to the workflow.\n        output_mode: Mode for adding managed agents' outputs to the message history in the multi-agent workflow.\n\n            Can be one of:\n\n            - `full_history`: Add the entire agent message history\n            - `last_message`: Add only the last message\n        add_handoff_messages: Whether to add a pair of `(AIMessage, ToolMessage)` to the message history\n            when a handoff occurs.\n        handoff_tool_prefix: Optional prefix for the handoff tools (e.g., `'delegate_to_'` or `'transfer_to_'`)\n\n            If provided, the handoff tools will be named `handoff_tool_prefix_agent_name`.\n\n            If not provided, the handoff tools will be named `transfer_to_agent_name`.\n        add_handoff_back_messages: Whether to add a pair of `(AIMessage, ToolMessage)` to the message history\n            when returning control to the supervisor to indicate that a handoff has occurred.\n        supervisor_name: Name of the supervisor node.\n        include_agent_name: Use to specify how to expose the agent name to the underlying supervisor LLM.\n\n            - `None`: Relies on the LLM provider using the name attribute on the AI message. Currently, only OpenAI supports this.\n            - `'inline'`: Add the agent name directly into the content field of the AI message using XML-style tags.\n\n                Example: `\"How can I help you\"` -> `\"<name>agent_name</name><content>How can I help you?</content>\"`\n\n    Example:\n        ```python\n        from langchain_openai import ChatOpenAI\n\n        from langgraph_supervisor import create_supervisor\n        from langgraph.prebuilt import create_react_agent\n\n        # Create specialized agents\n\n        def add(a: float, b: float) -> float:\n            '''Add two numbers.'''\n            return a + b\n\n        def web_search(query: str) -> str:\n            '''Search the web for information.'''\n            return 'Here are the headcounts for each of the FAANG companies in 2024...'\n\n        math_agent = create_react_agent(\n            model=\"openai:gpt-4o\",\n            tools=[add],\n            name=\"math_expert\",\n        )\n\n        research_agent = create_react_agent(\n            model=\"openai:gpt-4o\",\n            tools=[web_search],\n            name=\"research_expert\",\n        )\n\n        # Create supervisor workflow\n        workflow = create_supervisor(\n            [research_agent, math_agent],\n            model=ChatOpenAI(model=\"gpt-4o\"),\n        )\n\n        # Compile and run\n        app = workflow.compile()\n        result = app.invoke({\n            \"messages\": [\n                {\n                    \"role\": \"user\",\n                    \"content\": \"what's the combined headcount of the FAANG companies in 2024?\"\n                }\n            ]\n        })\n        ```\n    \"\"\"\n    if (config_schema := deprecated_kwargs.get(\"config_schema\", None)) is not None:\n        warn(\n            \"`config_schema` is deprecated. Please use `context_schema` instead.\",\n            DeprecationWarning,\n            stacklevel=2,\n        )\n        context_schema = config_schema\n\n    if add_handoff_back_messages is None:\n        add_handoff_back_messages = add_handoff_messages\n\n    supervisor_schema = state_schema or (\n        AgentStateWithStructuredResponse if response_format is not None else AgentState  # type: ignore[deprecated]\n    )\n    workflow_schema = state_schema or _OuterState\n\n    agent_names = set()\n    for agent in agents:\n        if agent.name is None or agent.name == \"LangGraph\":\n            raise ValueError(\n                \"Please specify a name when you create your agent, either via `create_react_agent(..., name=agent_name)` \"\n                \"or via `graph.compile(name=name)`.\"\n            )\n\n        if agent.name in agent_names:\n            raise ValueError(\n                f\"Agent with name '{agent.name}' already exists. Agent names must be unique.\"\n            )\n\n        agent_names.add(agent.name)\n\n    tool_node = _prepare_tool_node(\n        tools,\n        handoff_tool_prefix,\n        add_handoff_messages,\n        agent_names,\n    )\n    all_tools = list(tool_node.tools_by_name.values())\n\n    if _should_bind_tools(model, all_tools):\n        if _supports_disable_parallel_tool_calls(model):\n            model = cast(BaseChatModel, model).bind_tools(\n                all_tools, parallel_tool_calls=parallel_tool_calls\n            )\n        else:\n            model = cast(BaseChatModel, model).bind_tools(all_tools)\n\n    if include_agent_name:\n        model = with_agent_name(model, include_agent_name)\n\n    supervisor_agent = create_react_agent(  # type: ignore[deprecated]\n        name=supervisor_name,\n        model=model,\n        tools=tool_node,\n        prompt=prompt,\n        state_schema=supervisor_schema,\n        response_format=response_format,\n        pre_model_hook=pre_model_hook,\n        post_model_hook=post_model_hook,\n    )\n\n    builder = StateGraph(cast(Type[Any], workflow_schema), context_schema=context_schema)\n    builder.add_node(supervisor_agent, destinations=tuple(agent_names) + (END,))\n    builder.add_edge(START, supervisor_agent.name)\n    for agent in agents:\n        builder.add_node(\n            agent.name,\n            _make_call_agent(\n                agent,\n                output_mode,\n                add_handoff_back_messages=add_handoff_back_messages,\n                supervisor_name=supervisor_name,\n            ),\n        )\n        builder.add_edge(agent.name, supervisor_agent.name)\n\n    return builder\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[build-system]\nrequires = [\"pdm-backend\"]\nbuild-backend = \"pdm.backend\"\n\n[project]\nname = \"langgraph-supervisor\"\nversion = \"0.0.31\"\ndescription = \"An implementation of a supervisor multi-agent architecture using LangGraph\"\nauthors = [\n    {name = \"Vadym Barda\", email = \"19161700+vbarda@users.noreply.github.com \"}\n]\nlicense = \"MIT\"\nlicense-files = [\"LICENSE\"]\nreadme = \"README.md\"\nrequires-python = \">=3.10\"\ndependencies = [\n    \"langgraph>=1.0.2,<2.0.0\",\n    \"langchain-core>=1.0.0,<2.0.0\"\n]\n\n[project.urls]\nSource = \"https://github.com/langchain-ai/langgraph-supervisor-py\"\nChangelog = \"https://github.com/langchain-ai/langgraph-supervisor-py/releases\"\nTwitter = \"https://x.com/LangChainAI\"\nSlack = \"https://www.langchain.com/join-community\"\nReddit = \"https://www.reddit.com/r/LangChain/\"\n\n[dependency-groups]\ntest = [\n    \"pytest>=8.0.0\",\n    \"ruff>=0.9.4\",\n    \"mypy>=1.8.0\",\n    \"pytest-socket>=0.7.0\",\n    \"types-setuptools>=69.0.0\",\n]\n\n[tool.pytest.ini_options]\nminversion = \"8.0\"\naddopts = \"-ra -q -v\"\ntestpaths = [\n    \"tests\",\n]\npython_files = [\"test_*.py\"]\npython_functions = [\"test_*\"]\n\n[tool.ruff]\nline-length = 100\ntarget-version = \"py310\"\n\n[tool.ruff.lint]\nselect = [\n    \"E\",  # pycodestyle errors\n    \"W\",  # pycodestyle warnings\n    \"F\",  # pyflakes\n    \"I\",  # isort\n    \"B\",  # flake8-bugbear\n]\nignore = [\n  \"E501\" # line-length\n]\n\n\n[tool.mypy]\npython_version = \"3.11\"\nwarn_return_any = true\nwarn_unused_configs = true\ndisallow_untyped_defs = true\ncheck_untyped_defs = true\n\n[tool.ty.rules]\nno-matching-overload = \"ignore\" \ncall-non-callable = \"ignore\"\nunresolved-import = \"ignore\"\n[tool.ty.src]\nexclude = [\"tests\"]\n"
  },
  {
    "path": "tests/__init__.py",
    "content": ""
  },
  {
    "path": "tests/test_agent_name.py",
    "content": "from langchain_core.messages import AIMessage, HumanMessage\n\nfrom langgraph_supervisor.agent_name import (\n    add_inline_agent_name,\n    remove_inline_agent_name,\n)\n\n\ndef test_add_inline_agent_name() -> None:\n    # Test that non-AI messages are returned unchanged.\n    human_message = HumanMessage(content=\"Hello\")\n    result = add_inline_agent_name(human_message)\n    assert result == human_message\n\n    # Test that AI messages with no name are returned unchanged.\n    ai_message = AIMessage(content=\"Hello world\")\n    result = add_inline_agent_name(ai_message)\n    assert result == ai_message\n\n    # Test that AI messages get formatted with name and content tags.\n    ai_message = AIMessage(content=\"Hello world\", name=\"assistant\")\n    result = add_inline_agent_name(ai_message)\n    assert result.content == \"<name>assistant</name><content>Hello world</content>\"\n    assert result.name == \"assistant\"\n\n\ndef test_add_inline_agent_name_content_blocks() -> None:\n    content_blocks: list[str | dict] = [\n        {\"type\": \"text\", \"text\": \"Hello world\"},\n        {\"type\": \"image\", \"image_url\": \"http://example.com/image.jpg\"},\n    ]\n    ai_message = AIMessage(content=content_blocks, name=\"assistant\")\n    result = add_inline_agent_name(ai_message)\n    assert result.content == [\n        {\"type\": \"text\", \"text\": \"<name>assistant</name><content>Hello world</content>\"},\n        {\"type\": \"image\", \"image_url\": \"http://example.com/image.jpg\"},\n    ]\n\n    # Test that content blocks without text blocks are returned unchanged\n    content_blocks = [\n        {\"type\": \"image\", \"image_url\": \"http://example.com/image.jpg\"},\n        {\"type\": \"file\", \"file_url\": \"http://example.com/document.pdf\"},\n    ]\n    expected_content_blocks = [\n        {\"type\": \"text\", \"text\": \"<name>assistant</name><content></content>\"}\n    ] + content_blocks\n    ai_message = AIMessage(content=content_blocks, name=\"assistant\")\n    result = add_inline_agent_name(ai_message)\n\n    # The message should be returned unchanged\n    assert result.content == expected_content_blocks\n\n\ndef test_remove_inline_agent_name() -> None:\n    # Test that non-AI messages are returned unchanged.\n    human_message = HumanMessage(content=\"Hello\")\n    result = remove_inline_agent_name(human_message)\n    assert result == human_message\n\n    # Test that messages with empty content are returned unchanged.\n    ai_message = AIMessage(content=\"\", name=\"assistant\")\n    result = remove_inline_agent_name(ai_message)\n    assert result == ai_message\n\n    # Test that messages without name/content tags are returned unchanged.\n    ai_message = AIMessage(content=\"Hello world\", name=\"assistant\")\n    result = remove_inline_agent_name(ai_message)\n    assert result == ai_message\n\n    # Test that content is correctly extracted from tags.\n    ai_message = AIMessage(\n        content=\"<name>assistant</name><content>Hello world</content>\", name=\"assistant\"\n    )\n    result = remove_inline_agent_name(ai_message)\n    assert result.content == \"Hello world\"\n    assert result.name == \"assistant\"\n\n\ndef test_remove_inline_agent_name_content_blocks() -> None:\n    content_blocks: list[str | dict] = [\n        {\"type\": \"text\", \"text\": \"<name>assistant</name><content>Hello world</content>\"},\n        {\"type\": \"image\", \"image_url\": \"http://example.com/image.jpg\"},\n    ]\n    ai_message = AIMessage(content=content_blocks, name=\"assistant\")\n    result = remove_inline_agent_name(ai_message)\n\n    expected_content = [\n        {\"type\": \"text\", \"text\": \"Hello world\"},\n        {\"type\": \"image\", \"image_url\": \"http://example.com/image.jpg\"},\n    ]\n    assert result.content == expected_content\n    assert result.name == \"assistant\"\n\n    # Test that content blocks without text blocks are returned unchanged\n    content_blocks = [\n        {\"type\": \"text\", \"text\": \"<name>assistant</name><content></content>\"},\n        {\"type\": \"image\", \"image_url\": \"http://example.com/image.jpg\"},\n        {\"type\": \"file\", \"file_url\": \"http://example.com/document.pdf\"},\n    ]\n    expected_content_blocks = content_blocks[1:]\n    ai_message = AIMessage(content=content_blocks, name=\"assistant\")\n    result = remove_inline_agent_name(ai_message)\n    assert result.content == expected_content_blocks\n\n\ndef test_remove_inline_agent_name_multiline_content() -> None:\n    multiline_content = \"\"\"<name>assistant</name><content>This is\na multiline\nmessage</content>\"\"\"\n    ai_message = AIMessage(content=multiline_content, name=\"assistant\")\n    result = remove_inline_agent_name(ai_message)\n    assert result.content == \"This is\\na multiline\\nmessage\"\n"
  },
  {
    "path": "tests/test_supervisor.py",
    "content": "\"\"\"Tests for the supervisor module.\"\"\"\n# mypy: ignore-errors\n\nfrom collections.abc import Callable, Sequence\nfrom typing import Any, Optional, cast\n\nimport pytest\nfrom langchain_core.callbacks.manager import CallbackManagerForLLMRun\nfrom langchain_core.language_models.chat_models import BaseChatModel, LanguageModelInput\nfrom langchain_core.messages import AIMessage, BaseMessage, HumanMessage\nfrom langchain_core.outputs import ChatGeneration, ChatResult\nfrom langchain_core.runnables import Runnable, RunnableConfig\nfrom langchain_core.tools import BaseTool, tool\nfrom langgraph.graph import MessagesState, StateGraph\nfrom langgraph.prebuilt import create_react_agent\n\nfrom langgraph_supervisor import create_supervisor\nfrom langgraph_supervisor.agent_name import AgentNameMode, with_agent_name\nfrom langgraph_supervisor.handoff import create_forward_message_tool\n\n\nclass FakeChatModel(BaseChatModel):\n    idx: int = 0\n    responses: Sequence[BaseMessage]\n\n    @property\n    def _llm_type(self) -> str:\n        return \"fake-tool-call-model\"\n\n    def _generate(\n        self,\n        messages: list[BaseMessage],\n        stop: Optional[list[str]] = None,\n        run_manager: Optional[CallbackManagerForLLMRun] = None,\n        **kwargs: dict[str, Any],\n    ) -> ChatResult:\n        generation = ChatGeneration(message=self.responses[self.idx])\n        self.idx += 1\n        return ChatResult(generations=[generation])\n\n    def bind_tools(\n        self, tools: Sequence[dict[str, Any] | type | Callable | BaseTool], **kwargs: Any\n    ) -> Runnable[LanguageModelInput, BaseMessage]:\n        tool_dicts = [\n            {\n                \"name\": tool.name if isinstance(tool, BaseTool) else str(tool),\n            }\n            for tool in tools\n        ]\n        return self.bind(tools=tool_dicts)\n\n\nsupervisor_messages = [\n    AIMessage(\n        content=\"\",\n        tool_calls=[\n            {\n                \"name\": \"transfer_to_research_expert\",\n                \"args\": {},\n                \"id\": \"call_gyQSgJQm5jJtPcF5ITe8GGGF\",\n                \"type\": \"tool_call\",\n            }\n        ],\n    ),\n    AIMessage(\n        content=\"\",\n        tool_calls=[\n            {\n                \"name\": \"transfer_to_math_expert\",\n                \"args\": {},\n                \"id\": \"call_zCExWE54g4B4oFZcwBh3Wumg\",\n                \"type\": \"tool_call\",\n            }\n        ],\n    ),\n    AIMessage(\n        content=\"The combined headcount of the FAANG companies in 2024 is 1,977,586 employees.\",\n    ),\n]\n\nresearch_agent_messages = [\n    AIMessage(\n        content=\"\",\n        tool_calls=[\n            {\n                \"name\": \"web_search\",\n                \"args\": {\"query\": \"FAANG headcount 2024\"},\n                \"id\": \"call_4sLYp7usFcIZBFcNsOGQiFzV\",\n                \"type\": \"tool_call\",\n            },\n        ],\n    ),\n    AIMessage(\n        content=\"The headcount for the FAANG companies in 2024 is as follows:\\n\\n1. **Facebook (Meta)**: 67,317 employees\\n2. **Amazon**: 1,551,000 employees\\n3. **Apple**: 164,000 employees\\n4. **Netflix**: 14,000 employees\\n5. **Google (Alphabet)**: 181,269 employees\\n\\nTo find the combined headcount, simply add these numbers together.\",\n    ),\n]\n\nmath_agent_messages = [\n    AIMessage(\n        content=\"\",\n        tool_calls=[\n            {\n                \"name\": \"add\",\n                \"args\": {\"a\": 67317, \"b\": 1551000},\n                \"id\": \"call_BRvA6oAlgMA1whIkAn9gE3AS\",\n                \"type\": \"tool_call\",\n            },\n            {\n                \"name\": \"add\",\n                \"args\": {\"a\": 164000, \"b\": 14000},\n                \"id\": \"call_OLVb4v0pNDlsBsKBwDK4wb1W\",\n                \"type\": \"tool_call\",\n            },\n            {\n                \"name\": \"add\",\n                \"args\": {\"a\": 181269, \"b\": 0},\n                \"id\": \"call_5VEHaInDusJ9MU3i3tVJN6Hr\",\n                \"type\": \"tool_call\",\n            },\n        ],\n    ),\n    AIMessage(\n        content=\"\",\n        tool_calls=[\n            {\n                \"name\": \"add\",\n                \"args\": {\"a\": 1618317, \"b\": 178000},\n                \"id\": \"call_FdfUz8Gm3S5OQaVq2oQpMxeN\",\n                \"type\": \"tool_call\",\n            },\n            {\n                \"name\": \"add\",\n                \"args\": {\"a\": 181269, \"b\": 0},\n                \"id\": \"call_j5nna1KwGiI60wnVHM2319r6\",\n                \"type\": \"tool_call\",\n            },\n        ],\n    ),\n    AIMessage(\n        content=\"\",\n        tool_calls=[\n            {\n                \"name\": \"add\",\n                \"args\": {\"a\": 1796317, \"b\": 181269},\n                \"id\": \"call_4fNHtFvfOvsaSPb8YK1qNAiR\",\n                \"type\": \"tool_call\",\n            }\n        ],\n    ),\n    AIMessage(\n        content=\"The combined headcount of the FAANG companies in 2024 is 1,977,586 employees.\",\n    ),\n]\n\n\n@pytest.mark.parametrize(\n    \"include_agent_name,include_individual_agent_name\",\n    [\n        (None, None),\n        (None, \"inline\"),\n        (\"inline\", None),\n        (\"inline\", \"inline\"),\n    ],\n)\ndef test_supervisor_basic_workflow(\n    include_agent_name: AgentNameMode | None,\n    include_individual_agent_name: AgentNameMode | None,\n) -> None:\n    \"\"\"Test basic supervisor workflow with two agents.\"\"\"\n\n    # output_mode = \"last_message\"\n    @tool\n    def add(a: float, b: float) -> float:\n        \"\"\"Add two numbers.\"\"\"\n        return a + b\n\n    @tool\n    def web_search(query: str) -> str:\n        \"\"\"Search the web for information.\"\"\"\n        return (\n            \"Here are the headcounts for each of the FAANG companies in 2024:\\n\"\n            \"1. **Facebook (Meta)**: 67,317 employees.\\n\"\n            \"2. **Apple**: 164,000 employees.\\n\"\n            \"3. **Amazon**: 1,551,000 employees.\\n\"\n            \"4. **Netflix**: 14,000 employees.\\n\"\n            \"5. **Google (Alphabet)**: 181,269 employees.\"\n        )\n\n    math_model: FakeChatModel = FakeChatModel(responses=math_agent_messages)\n    if include_individual_agent_name:\n        math_model = cast(\n            FakeChatModel,\n            with_agent_name(math_model.bind_tools([add]), include_individual_agent_name),\n        )\n\n    math_agent = create_react_agent(\n        model=math_model,\n        tools=[add],\n        name=\"math_expert\",\n    )\n\n    research_model = FakeChatModel(responses=research_agent_messages)\n    if include_individual_agent_name:\n        research_model = cast(\n            FakeChatModel,\n            with_agent_name(research_model.bind_tools([web_search]), include_individual_agent_name),\n        )\n\n    research_agent = create_react_agent(\n        model=research_model,\n        tools=[web_search],\n        name=\"research_expert\",\n    )\n\n    workflow = create_supervisor(\n        [math_agent, research_agent],\n        model=FakeChatModel(responses=supervisor_messages),\n        include_agent_name=include_agent_name,\n    )\n\n    app = workflow.compile()\n    assert app is not None\n\n    result = app.invoke(\n        {\n            \"messages\": [\n                HumanMessage(\n                    content=\"what's the combined headcount of the FAANG companies in 2024?\"\n                )\n            ]\n        }\n    )\n\n    assert len(result[\"messages\"]) == 12\n    # first supervisor handoff\n    assert result[\"messages\"][1] == supervisor_messages[0]\n    # last research agent message\n    assert result[\"messages\"][3] == research_agent_messages[-1]\n    # next supervisor handoff\n    assert result[\"messages\"][6] == supervisor_messages[1]\n    # last math agent message\n    assert result[\"messages\"][8] == math_agent_messages[-1]\n    # final supervisor message\n    assert result[\"messages\"][11] == supervisor_messages[-1]\n\n    # output_mode = \"full_history\"\n    math_agent = create_react_agent(\n        model=FakeChatModel(responses=math_agent_messages),\n        tools=[add],\n        name=\"math_expert\",\n    )\n\n    research_agent = create_react_agent(\n        model=FakeChatModel(responses=research_agent_messages),\n        tools=[web_search],\n        name=\"research_expert\",\n    )\n\n    workflow_full_history = create_supervisor(\n        [math_agent, research_agent],\n        model=FakeChatModel(responses=supervisor_messages),\n        output_mode=\"full_history\",\n    )\n    app_full_history = workflow_full_history.compile()\n    result_full_history = app_full_history.invoke(\n        {\n            \"messages\": [\n                HumanMessage(\n                    content=\"what's the combined headcount of the FAANG companies in 2024?\"\n                )\n            ]\n        }\n    )\n\n    assert len(result_full_history[\"messages\"]) == 23\n    # first supervisor handoff\n    assert result_full_history[\"messages\"][1] == supervisor_messages[0]\n    # all research agent AI messages\n    assert result_full_history[\"messages\"][3] == research_agent_messages[0]\n    assert result_full_history[\"messages\"][5] == research_agent_messages[1]\n    # next supervisor handoff\n    assert result_full_history[\"messages\"][8] == supervisor_messages[1]\n    # all math agent AI messages\n    assert result_full_history[\"messages\"][10] == math_agent_messages[0]\n    assert result_full_history[\"messages\"][14] == math_agent_messages[1]\n    assert result_full_history[\"messages\"][17] == math_agent_messages[2]\n    # final supervisor message\n    assert result_full_history[\"messages\"][-1] == supervisor_messages[-1]\n\n\nclass FakeChatModelWithAssertion(FakeChatModel):\n    assertion: Callable[[list[BaseMessage]], None]\n\n    def _generate(\n        self,\n        messages: list[BaseMessage],\n        stop: Optional[list[str]] = None,\n        run_manager: Optional[CallbackManagerForLLMRun] = None,\n        **kwargs: dict[str, Any],\n    ) -> ChatResult:\n        self.assertion(messages)\n        return super()._generate(messages, stop, run_manager, **kwargs)\n\n\ndef get_tool_calls(msg: BaseMessage) -> list[dict[str, Any]] | None:\n    tool_calls = getattr(msg, \"tool_calls\", None)\n    if tool_calls is None:\n        return None\n    return [\n        {\"name\": tc[\"name\"], \"args\": tc[\"args\"]} for tc in tool_calls if tc[\"type\"] == \"tool_call\"\n    ]\n\n\ndef as_dict(msg: BaseMessage) -> dict[str, Any]:\n    return {\n        \"name\": msg.name,\n        \"content\": msg.content,\n        \"tool_calls\": get_tool_calls(msg),\n        \"type\": msg.type,\n    }\n\n\nclass Expectations:\n    def __init__(self, expected: list[list[dict[str, Any]]]) -> None:\n        self.expected = expected.copy()\n\n    def __call__(self, messages: list[BaseMessage]) -> None:\n        expected = self.expected.pop(0)\n        received = [as_dict(m) for m in messages]\n        assert expected == received\n\n\ndef test_worker_hide_handoffs() -> None:\n    \"\"\"Test that the supervisor forwards a message to a specific agent and receives the correct response.\"\"\"\n\n    @tool\n    def echo_tool(text: str) -> str:\n        \"\"\"Echo the input text.\"\"\"\n        return text\n\n    expectations: list[list[dict[str, Any]]] = [\n        [\n            {\n                \"name\": None,\n                \"content\": \"Scooby-dooby-doo\",\n                \"tool_calls\": None,\n                \"type\": \"human\",\n            }\n        ],\n        [\n            {\n                \"name\": None,\n                \"content\": \"Scooby-dooby-doo\",\n                \"tool_calls\": None,\n                \"type\": \"human\",\n            },\n            {\n                \"name\": \"echo_agent\",\n                \"content\": \"Echo 1!\",\n                \"tool_calls\": [],\n                \"type\": \"ai\",\n            },\n            {\"name\": \"supervisor\", \"content\": \"boo\", \"tool_calls\": [], \"type\": \"ai\"},\n            {\n                \"name\": None,\n                \"content\": \"Huh take two?\",\n                \"tool_calls\": None,\n                \"type\": \"human\",\n            },\n        ],\n    ]\n\n    echo_model = FakeChatModelWithAssertion(\n        responses=[\n            AIMessage(content=\"Echo 1!\"),\n            AIMessage(content=\"Echo 2!\"),\n        ],\n        assertion=Expectations(expectations),\n    )\n    echo_agent = create_react_agent(\n        model=echo_model.bind_tools([echo_tool]),\n        tools=[echo_tool],\n        name=\"echo_agent\",\n    )\n\n    supervisor_messages = [\n        AIMessage(\n            content=\"\",\n            tool_calls=[\n                {\n                    \"name\": \"transfer_to_echo_agent\",\n                    \"args\": {},\n                    \"id\": \"call_gyQSgJQm5jJtPcF5ITe8GGGF\",\n                    \"type\": \"tool_call\",\n                }\n            ],\n        ),\n        AIMessage(\n            content=\"boo\",\n        ),\n        AIMessage(\n            content=\"\",\n            tool_calls=[\n                {\n                    \"name\": \"transfer_to_echo_agent\",\n                    \"args\": {},\n                    \"id\": \"call_gyQSgJQm5jJtPcF5ITe8GGGG\",\n                    \"type\": \"tool_call\",\n                }\n            ],\n        ),\n        AIMessage(\n            content=\"END\",\n        ),\n    ]\n\n    workflow = create_supervisor(\n        [echo_agent],\n        model=FakeChatModel(responses=supervisor_messages),\n        add_handoff_messages=False,\n    )\n    app = workflow.compile()\n\n    result = app.invoke({\"messages\": [HumanMessage(content=\"Scooby-dooby-doo\")]})\n    app.invoke({\"messages\": result[\"messages\"] + [HumanMessage(content=\"Huh take two?\")]})\n\n\ndef test_supervisor_message_forwarding() -> None:\n    \"\"\"Test that the supervisor forwards a message to a specific agent and receives the correct response.\"\"\"\n\n    @tool\n    def echo_tool(text: str) -> str:\n        \"\"\"Echo the input text.\"\"\"\n        return text\n\n    # Agent that simply echoes the message\n    echo_model = FakeChatModel(\n        responses=[\n            AIMessage(content=\"Echo: test forwarding!\"),\n        ]\n    )\n    echo_agent = create_react_agent(\n        model=echo_model.bind_tools([echo_tool]),\n        tools=[echo_tool],\n        name=\"echo_agent\",\n    )\n\n    supervisor_messages = [\n        AIMessage(\n            content=\"\",\n            tool_calls=[\n                {\n                    \"name\": \"transfer_to_echo_agent\",\n                    \"args\": {},\n                    \"id\": \"call_gyQSgJQm5jJtPcF5ITe8GGGF\",\n                    \"type\": \"tool_call\",\n                }\n            ],\n        ),\n        AIMessage(\n            content=\"\",\n            tool_calls=[\n                {\n                    \"name\": \"forward_message\",\n                    \"args\": {\"from_agent\": \"echo_agent\"},\n                    \"id\": \"abcd123\",\n                    \"type\": \"tool_call\",\n                }\n            ],\n        ),\n    ]\n\n    forwarding = create_forward_message_tool(\"supervisor\")\n    workflow = create_supervisor(\n        [echo_agent],\n        model=FakeChatModel(responses=supervisor_messages),\n        tools=[forwarding],\n    )\n    app = workflow.compile()\n\n    result = app.invoke({\"messages\": [HumanMessage(content=\"Scooby-dooby-doo\")]})\n\n    def get_tool_calls(msg: BaseMessage) -> list[dict[str, Any]] | None:\n        tool_calls = getattr(msg, \"tool_calls\", None)\n        if tool_calls is None:\n            return None\n        return [\n            {\"name\": tc[\"name\"], \"args\": tc[\"args\"]}\n            for tc in tool_calls\n            if tc[\"type\"] == \"tool_call\"\n        ]\n\n    received = [\n        {\n            \"name\": msg.name,\n            \"content\": msg.content,\n            \"tool_calls\": get_tool_calls(msg),\n            \"type\": msg.type,\n        }\n        for msg in result[\"messages\"]\n    ]\n\n    expected = [\n        {\n            \"name\": None,\n            \"content\": \"Scooby-dooby-doo\",\n            \"tool_calls\": None,\n            \"type\": \"human\",\n        },\n        {\n            \"name\": \"supervisor\",\n            \"content\": \"\",\n            \"tool_calls\": [\n                {\n                    \"name\": \"transfer_to_echo_agent\",\n                    \"args\": {},\n                }\n            ],\n            \"type\": \"ai\",\n        },\n        {\n            \"name\": \"transfer_to_echo_agent\",\n            \"content\": \"Successfully transferred to echo_agent\",\n            \"tool_calls\": None,\n            \"type\": \"tool\",\n        },\n        {\n            \"name\": \"echo_agent\",\n            \"content\": \"Echo: test forwarding!\",\n            \"tool_calls\": [],\n            \"type\": \"ai\",\n        },\n        {\n            \"name\": \"echo_agent\",\n            \"content\": \"Transferring back to supervisor\",\n            \"tool_calls\": [\n                {\n                    \"name\": \"transfer_back_to_supervisor\",\n                    \"args\": {},\n                }\n            ],\n            \"type\": \"ai\",\n        },\n        {\n            \"name\": \"transfer_back_to_supervisor\",\n            \"content\": \"Successfully transferred back to supervisor\",\n            \"tool_calls\": None,\n            \"type\": \"tool\",\n        },\n        {\n            \"name\": \"supervisor\",\n            \"content\": \"Echo: test forwarding!\",\n            \"tool_calls\": [],\n            \"type\": \"ai\",\n        },\n    ]\n    assert received == expected\n\n\ndef test_metadata_passed_to_subagent() -> None:\n    \"\"\"Test that metadata from config is passed to sub-agents.\n\n    This test verifies that when a config object with metadata is passed to the supervisor,\n    the metadata is correctly passed to the sub-agent when it is invoked.\n    \"\"\"\n\n    # Create a tracking agent to verify metadata is passed\n    def test_node(_state: MessagesState, config: RunnableConfig) -> dict[str, list[BaseMessage]]:\n        # Assert that the metadata is passed to the sub-agent\n        assert config[\"metadata\"][\"test_key\"] == \"test_value\"\n        assert config[\"metadata\"][\"another_key\"] == 123\n        # Return a new message if the assertion passes.\n        return {\"messages\": [AIMessage(content=\"Test response\")]}\n\n    tracking_agent_workflow = StateGraph(MessagesState)\n    tracking_agent_workflow.add_node(\"test_node\", test_node)\n    tracking_agent_workflow.set_entry_point(\"test_node\")\n    tracking_agent_workflow.set_finish_point(\"test_node\")\n    tracking_agent = tracking_agent_workflow.compile()\n    tracking_agent.name = \"test_agent\"\n\n    # Create a supervisor with the tracking agent\n    supervisor_model = FakeChatModel(\n        responses=[\n            AIMessage(\n                content=\"\",\n                tool_calls=[\n                    {\n                        \"name\": \"transfer_to_test_agent\",\n                        \"args\": {},\n                        \"id\": \"call_123\",\n                        \"type\": \"tool_call\",\n                    }\n                ],\n            ),\n            AIMessage(content=\"Final response\"),\n        ]\n    )\n\n    supervisor = create_supervisor(\n        agents=[tracking_agent],\n        model=supervisor_model,\n    ).compile()\n\n    # Create config with metadata\n    test_metadata = {\"test_key\": \"test_value\", \"another_key\": 123}\n    config: RunnableConfig = {\"metadata\": test_metadata}\n\n    # Invoke the supervisor with the config\n    result = supervisor.invoke({\"messages\": [HumanMessage(content=\"Test message\")]}, config=config)\n    # Get the last message in the messages list & verify it matches the value\n    # returned from the node.\n    assert result[\"messages\"][-1].content == \"Final response\"\n"
  },
  {
    "path": "tests/test_supervisor_functional_api.py",
    "content": "\"\"\"Tests for the supervisor module using functional API.\"\"\"\n# mypy: ignore-errors\n\nfrom typing import Any, Dict, List\n\nfrom langchain_core.language_models.fake_chat_models import GenericFakeChatModel\nfrom langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage\nfrom langgraph.func import entrypoint, task\nfrom langgraph.graph import add_messages\n\nfrom langgraph_supervisor import create_supervisor\n\n\nclass FakeModel(GenericFakeChatModel):\n    def bind_tools(self, *args: tuple, **kwargs: Any) -> \"FakeModel\":\n        \"\"\"Do nothing for now.\"\"\"\n        return self\n\n\ndef test_supervisor_functional_workflow() -> None:\n    \"\"\"Test supervisor workflow with a functional API agent.\"\"\"\n    model = FakeModel(\n        messages=iter([AIMessage(content=\"Mocked response\")]),\n    )\n\n    # Create a joke agent using functional API\n    @task\n    def generate_joke(messages: List[BaseMessage]) -> BaseMessage:\n        \"\"\"Generate a joke using the model.\"\"\"\n        return model.invoke([SystemMessage(content=\"Write a short joke\")] + list(messages))\n\n    @entrypoint()\n    def joke_agent(state: Dict[str, Any]) -> Dict[str, Any]:\n        \"\"\"Joke agent entrypoint.\"\"\"\n        joke = generate_joke(state[\"messages\"]).result()\n        messages = add_messages(state[\"messages\"], joke)\n        return {\"messages\": messages}\n\n    # Set agent name\n    joke_agent.name = \"joke_agent\"\n\n    # Create supervisor workflow\n    workflow = create_supervisor(\n        [joke_agent], model=model, prompt=\"You are a supervisor managing a joke expert.\"\n    )\n\n    # Compile and test\n    app = workflow.compile()\n    assert app is not None\n\n    result = app.invoke({\"messages\": [HumanMessage(content=\"Tell me a joke!\")]})\n\n    # Verify results\n    assert \"messages\" in result\n    assert len(result[\"messages\"]) > 0\n    assert any(\"joke\" in msg.content.lower() for msg in result[\"messages\"])\n"
  }
]