[
  {
    "path": ".claude-plugin/marketplace.json",
    "content": "{\n  \"name\": \"exa-mcp-server\",\n  \"owner\": {\n    \"name\": \"Exa\",\n    \"email\": \"hello@exa.ai\"\n  },\n  \"metadata\": {\n    \"description\": \"Official Exa AI plugin providing web search, code search, company research, and deep research capabilities\",\n    \"version\": \"1.0.0\"\n  },\n  \"plugins\": [\n    {\n      \"name\": \"exa-mcp-server\",\n      \"source\": \"./\",\n      \"description\": \"A Model Context Protocol server with Exa for web search, code search, and web crawling. Provides real-time web searches with configurable tool selection, allowing users to enable or disable specific search capabilities.\",\n      \"version\": \"3.1.9\",\n      \"author\": {\n        \"name\": \"Exa\",\n        \"email\": \"hello@exa.ai\"\n      },\n      \"homepage\": \"https://docs.exa.ai/reference/exa-mcp\",\n      \"repository\": \"https://github.com/exa-labs/exa-mcp-server\",\n      \"license\": \"MIT\",\n      \"keywords\": [\n        \"mcp\",\n        \"search\",\n        \"web-search\",\n        \"code-search\",\n        \"exa\",\n        \"research\",\n        \"company-research\",\n        \"linkedin\",\n        \"crawling\"\n      ],\n      \"category\": \"productivity\",\n      \"strict\": false,\n      \"mcpServers\": {\n        \"exa\": {\n          \"type\": \"http\",\n          \"url\": \"https://mcp.exa.ai/mcp\",\n          \"headers\": {}\n        }\n      }\n    }\n  ]\n}\n\n"
  },
  {
    "path": ".gitignore",
    "content": "# Dependencies\nnode_modules/\n\n# Build outputs\nbuild/\ndist/\n.smithery/\n\n# Environment variables\n.env\n.env.local\n.env*.local\n\n# Logs\n*.log\nnpm-debug.log*\nyarn-debug.log*\nyarn-error.log*\n\n# OS\n.DS_Store\nThumbs.db\n\n# IDE\n.vscode/\n.idea/\n*.swp\n*.swo\n*~\n\n# Vercel\n.vercel\n\n# TypeScript\n*.tsbuildinfo\n"
  },
  {
    "path": ".npmignore",
    "content": "src/\ntests/\n.github/\n.gitignore\n.npmignore\ntsconfig.json\n*.log\n.env* "
  },
  {
    "path": ".vercelignore",
    "content": "# Smithery build artifacts\n.smithery/\n\n# Node modules\nnode_modules/\n\n# Build directories\nbuild/\n\n# Documentation\n*.md\n!README.md\n\n# Git\n.git/\n.gitignore\n\n# IDE\n.vscode/\n.idea/\n\n# Local environment\n.env.local\n.env*.local\n\n# Logs\n*.log\nnpm-debug.log*\nyarn-debug.log*\nyarn-error.log*\n\n# OS\n.DS_Store\nThumbs.db\n\n"
  },
  {
    "path": "Dockerfile",
    "content": "# Use the official Node.js 18 image as a parent image\nFROM node:18-alpine AS builder\n\n# Set the working directory in the container to /app\nWORKDIR /app\n\n# Copy package.json and package-lock.json into the container\nCOPY package.json package-lock.json ./\n\n# Install dependencies\nRUN npm ci --ignore-scripts\n\n# Copy the rest of the application code into the container\nCOPY src/ ./src/\nCOPY tsconfig.json ./\n\n# Build the project for Docker\nRUN npm run build\n\n# Use a minimal node image as the base image for running\nFROM node:18-alpine AS runner\n\nWORKDIR /app\n\n# Copy compiled code from the builder stage\nCOPY --from=builder /app/.smithery ./.smithery\nCOPY package.json package-lock.json ./\n\n# Install only production dependencies\nRUN npm ci --production --ignore-scripts\n\n# Set environment variable for the Exa API key\nENV EXA_API_KEY=your-api-key-here\n\n# Expose the port the app runs on\nEXPOSE 3000\n\n# Run the application\nENTRYPOINT [\"node\", \".smithery/index.cjs\"]"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2025 Exa Labs\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "# Exa MCP Server\n\n[![Install in Cursor](https://img.shields.io/badge/Install_in-Cursor-000000?style=flat-square&logoColor=white)](https://cursor.com/en/install-mcp?name=exa&config=eyJuYW1lIjoiZXhhIiwidHlwZSI6Imh0dHAiLCJ1cmwiOiJodHRwczovL21jcC5leGEuYWkvbWNwIn0=)\n[![Install in VS Code](https://img.shields.io/badge/Install_in-VS_Code-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://vscode.dev/redirect/mcp/install?name=exa&config=%7B%22type%22%3A%22http%22%2C%22url%22%3A%22https%3A%2F%2Fmcp.exa.ai%2Fmcp%22%7D)\n[![npm version](https://badge.fury.io/js/exa-mcp-server.svg)](https://www.npmjs.com/package/exa-mcp-server)\n\nConnect AI assistants to Exa's search capabilities: web search, code search, and company research.\n\n**[Full Documentation](https://docs.exa.ai/reference/exa-mcp)** | **[npm Package](https://www.npmjs.com/package/exa-mcp-server)** | **[Get Your Exa API Key](https://dashboard.exa.ai/api-keys)**\n\n## Installation\n\nConnect to Exa's hosted MCP server:\n\n```\nhttps://mcp.exa.ai/mcp\n```\n\n[Get your API key](https://dashboard.exa.ai/api-keys)\n\n<details>\n<summary><b>Cursor</b></summary>\n\nAdd to `~/.cursor/mcp.json`:\n\n```json\n{\n  \"mcpServers\": {\n    \"exa\": {\n      \"url\": \"https://mcp.exa.ai/mcp\"\n    }\n  }\n}\n```\n</details>\n\n<details>\n<summary><b>VS Code</b></summary>\n\nAdd to `.vscode/mcp.json`:\n\n```json\n{\n  \"servers\": {\n    \"exa\": {\n      \"type\": \"http\",\n      \"url\": \"https://mcp.exa.ai/mcp\"\n    }\n  }\n}\n```\n</details>\n\n<details>\n<summary><b>Claude Code</b></summary>\n\n```bash\nclaude mcp add --transport http exa https://mcp.exa.ai/mcp\n```\n</details>\n\n<details>\n<summary><b>Claude Desktop</b></summary>\n\nAdd to your config file:\n- **macOS:** `~/Library/Application Support/Claude/claude_desktop_config.json`\n- **Windows:** `%APPDATA%\\Claude\\claude_desktop_config.json`\n\n```json\n{\n  \"mcpServers\": {\n    \"exa\": {\n      \"command\": \"npx\",\n      \"args\": [\"-y\", \"mcp-remote\", \"https://mcp.exa.ai/mcp\"]\n    }\n  }\n}\n```\n</details>\n\n<details>\n<summary><b>Codex</b></summary>\n\n```bash\ncodex mcp add exa --url https://mcp.exa.ai/mcp\n```\n</details>\n\n<details>\n<summary><b>OpenCode</b></summary>\n\nAdd to your `opencode.json`:\n\n```json\n{\n  \"mcp\": {\n    \"exa\": {\n      \"type\": \"remote\",\n      \"url\": \"https://mcp.exa.ai/mcp\",\n      \"enabled\": true\n    }\n  }\n}\n```\n</details>\n\n<details>\n<summary><b>Antigravity</b></summary>\n\nOpen the MCP Store panel (from the \"...\" dropdown in the side panel), then add a custom server with:\n\n```\nhttps://mcp.exa.ai/mcp\n```\n</details>\n\n<details>\n<summary><b>Windsurf</b></summary>\n\nAdd to `~/.codeium/windsurf/mcp_config.json`:\n\n```json\n{\n  \"mcpServers\": {\n    \"exa\": {\n      \"serverUrl\": \"https://mcp.exa.ai/mcp\"\n    }\n  }\n}\n```\n</details>\n\n<details>\n<summary><b>Zed</b></summary>\n\nAdd to your Zed settings:\n\n```json\n{\n  \"context_servers\": {\n    \"exa\": {\n      \"url\": \"https://mcp.exa.ai/mcp\"\n    }\n  }\n}\n```\n</details>\n\n<details>\n<summary><b>Gemini CLI</b></summary>\n\nAdd to `~/.gemini/settings.json`:\n\n```json\n{\n  \"mcpServers\": {\n    \"exa\": {\n      \"httpUrl\": \"https://mcp.exa.ai/mcp\"\n    }\n  }\n}\n```\n</details>\n\n<details>\n<summary><b>v0 by Vercel</b></summary>\n\nIn v0, select **Prompt Tools** > **Add MCP** and enter:\n\n```\nhttps://mcp.exa.ai/mcp\n```\n</details>\n\n<details>\n<summary><b>Warp</b></summary>\n\nGo to **Settings** > **MCP Servers** > **Add MCP Server** and add:\n\n```json\n{\n  \"exa\": {\n    \"url\": \"https://mcp.exa.ai/mcp\"\n  }\n}\n```\n</details>\n\n<details>\n<summary><b>Kiro</b></summary>\n\nAdd to `~/.kiro/settings/mcp.json`:\n\n```json\n{\n  \"mcpServers\": {\n    \"exa\": {\n      \"url\": \"https://mcp.exa.ai/mcp\"\n    }\n  }\n}\n```\n</details>\n\n<details>\n<summary><b>Roo Code</b></summary>\n\nAdd to your Roo Code MCP config:\n\n```json\n{\n  \"mcpServers\": {\n    \"exa\": {\n      \"type\": \"streamable-http\",\n      \"url\": \"https://mcp.exa.ai/mcp\"\n    }\n  }\n}\n```\n</details>\n\n<details>\n<summary><b>Other Clients</b></summary>\n\nFor clients that support remote MCP:\n\n```json\n{\n  \"mcpServers\": {\n    \"exa\": {\n      \"url\": \"https://mcp.exa.ai/mcp\"\n    }\n  }\n}\n```\n\nFor clients that need mcp-remote:\n\n```json\n{\n  \"mcpServers\": {\n    \"exa\": {\n      \"command\": \"npx\",\n      \"args\": [\"-y\", \"mcp-remote\", \"https://mcp.exa.ai/mcp\"]\n    }\n  }\n}\n```\n</details>\n\n<details>\n<summary><b>Via npm Package</b></summary>\n\nUse the npm package with your API key. [Get your API key](https://dashboard.exa.ai/api-keys).\n\n```json\n{\n  \"mcpServers\": {\n    \"exa\": {\n      \"command\": \"npx\",\n      \"args\": [\"-y\", \"exa-mcp-server\"],\n      \"env\": {\n        \"EXA_API_KEY\": \"your_api_key\"\n      }\n    }\n  }\n}\n```\n</details>\n\n## Available Tools\n\n**Enabled by Default:**\n| Tool | Description |\n| ---- | ----------- |\n| `web_search_exa` | Search the web for any topic and get clean, ready-to-use content |\n| `get_code_context_exa` | Find code examples, documentation, and programming solutions from GitHub, Stack Overflow, and docs |\n\n**Off by Default:**\n| Tool | Description |\n| ---- | ----------- |\n| `web_search_advanced_exa` | Advanced web search with full control over filters, domains, dates, and content options |\n| `crawling_exa` | Get the full content of a specific webpage from a known URL |\n\nEnable additional tools with the `tools` parameter:\n\n```\nhttps://mcp.exa.ai/mcp?exaApiKey=YOUR_KEY&tools=web_search_exa,web_search_advanced_exa,get_code_context_exa,crawling_exa\n```\n\n## Agent Skills (Claude Skills)\n\nReady-to-use skills for Claude Code. Each skill teaches Claude how to use Exa search for a specific task. Copy the content inside a dropdown and paste it into Claude Code — it handles the rest.\n\n<details>\n<summary><b>Company Research</b></summary>\n\nCopy the content below and paste it into Claude Code. It will set up the MCP connection and skill for you.\n\n````\nStep 1: Install or update Exa MCP\n\nIf Exa MCP already exists in your MCP configuration, either uninstall it first and install the new one, or update your existing MCP config with this endpoint. Run this command in your terminal:\n\nclaude mcp add --transport http exa \"https://mcp.exa.ai/mcp?tools=web_search_advanced_exa\"\n\n\nStep 2: Add this Claude skill\n\n---\nname: company-research\ndescription: Company research using Exa search. Finds company info, competitors, news, tweets, financials, LinkedIn profiles, builds company lists. Use when researching companies, doing competitor analysis, market research, or building company lists.\ncontext: fork\n---\n\n# Company Research\n\n## Tool Restriction (Critical)\n\nONLY use `web_search_advanced_exa`. Do NOT use `web_search_exa` or any other Exa tools.\n\n## Token Isolation (Critical)\n\nNever run Exa searches in main context. Always spawn Task agents:\n- Agent runs Exa search internally\n- Agent processes results using LLM intelligence\n- Agent returns only distilled output (compact JSON or brief markdown)\n- Main context stays clean regardless of search volume\n\n## Dynamic Tuning\n\nNo hardcoded numResults. Tune to user intent:\n- User says \"a few\" → 10-20\n- User says \"comprehensive\" → 50-100\n- User specifies number → match it\n- Ambiguous? Ask: \"How many companies would you like?\"\n\n## Query Variation\n\nExa returns different results for different phrasings. For coverage:\n- Generate 2-3 query variations\n- Run in parallel\n- Merge and deduplicate\n\n## Categories\n\nUse appropriate Exa `category` depending on what you need:\n- `company` → homepages, rich metadata (headcount, location, funding, revenue)\n- `news` → press coverage, announcements\n- `tweet` → social presence, public commentary\n- `people` → LinkedIn profiles (public data)\n- No category (`type: \"auto\"`) → general web results, deep dives, broader context\n\nStart with `category: \"company\"` for discovery, then use other categories or no category with `livecrawl: \"fallback\"` for deeper research.\n\n### Category-Specific Filter Restrictions\n\nWhen using `category: \"company\"`, these parameters cause 400 errors:\n- `includeDomains` / `excludeDomains`\n- `startPublishedDate` / `endPublishedDate`\n- `startCrawlDate` / `endCrawlDate`\n\nWhen searching without a category (or with `news`), domain and date filters work fine.\n\n**Universal restriction:** `includeText` and `excludeText` only support **single-item arrays**. Multi-item arrays cause 400 errors across all categories.\n\n## LinkedIn\n\nPublic LinkedIn via Exa: `category: \"people\"`, no other filters.\nAuth-required LinkedIn → use Claude in Chrome browser fallback.\n\n## Browser Fallback\n\nAuto-fallback to Claude in Chrome when:\n- Exa returns insufficient results\n- Content is auth-gated\n- Dynamic pages need JavaScript\n\n## Examples\n\n### Discovery: find companies in a space\n```\nweb_search_advanced_exa {\n  \"query\": \"AI infrastructure startups San Francisco\",\n  \"category\": \"company\",\n  \"numResults\": 20,\n  \"type\": \"auto\"\n}\n```\n\n### Deep dive: research a specific company\n```\nweb_search_advanced_exa {\n  \"query\": \"Anthropic funding rounds valuation 2024\",\n  \"type\": \"deep\",\n  \"livecrawl\": \"fallback\",\n  \"numResults\": 10,\n  \"includeDomains\": [\"techcrunch.com\", \"crunchbase.com\", \"bloomberg.com\"]\n}\n```\n\n### News coverage\n```\nweb_search_advanced_exa {\n  \"query\": \"Anthropic AI safety\",\n  \"category\": \"news\",\n  \"numResults\": 15,\n  \"startPublishedDate\": \"2024-01-01\"\n}\n```\n\n### LinkedIn profiles\n```\nweb_search_advanced_exa {\n  \"query\": \"VP Engineering AI infrastructure\",\n  \"category\": \"people\",\n  \"numResults\": 20\n}\n```\n\n## Output Format\n\nReturn:\n1) Results (structured list; one company per row)\n2) Sources (URLs; 1-line relevance each)\n3) Notes (uncertainty/conflicts)\n\n\nStep 3: Ask User to Restart Claude Code\n\nYou should ask the user to restart Claude Code to have the config changes take effect.\n````\n\n</details>\n\n<details>\n<summary><b>Code Search</b></summary>\n\nCopy the content below and paste it into Claude Code. It will set up the MCP connection and skill for you.\n\n````\nStep 1: Install or update Exa MCP\n\nIf Exa MCP already exists in your MCP configuration, either uninstall it first and install the new one, or update your existing MCP config with this endpoint. Run this command in your terminal:\n\nclaude mcp add --transport http exa \"https://mcp.exa.ai/mcp?tools=get_code_context_exa\"\n\n\nStep 2: Add this Claude skill\n\n---\nname: get-code-context-exa\ndescription: Code context using Exa. Finds real snippets and docs from GitHub, StackOverflow, and technical docs. Use when searching for code examples, API syntax, library documentation, or debugging help.\ncontext: fork\n---\n\n# Code Context (Exa)\n\n## Tool Restriction (Critical)\n\nONLY use `get_code_context_exa`. Do NOT use other Exa tools.\n\n## Token Isolation (Critical)\n\nNever run Exa in main context. Always spawn Task agents:\n- Agent calls `get_code_context_exa`\n- Agent extracts the minimum viable snippet(s) + constraints\n- Agent deduplicates near-identical results (mirrors, forks, repeated StackOverflow answers) before presenting\n- Agent returns copyable snippets + brief explanation\n- Main context stays clean regardless of search volume\n\n## When to Use\n\nUse this tool for ANY programming-related request:\n- API usage and syntax\n- SDK/library examples\n- config and setup patterns\n- framework \"how to\" questions\n- debugging when you need authoritative snippets\n\n## Inputs (Supported)\n\n`get_code_context_exa` supports:\n- `query` (string, required)\n- `tokensNum` (number, optional; default ~5000; typical range 1000–50000)\n\n## Query Writing Patterns (High Signal)\n\nTo reduce irrelevant results and cross-language noise:\n- Always include the **programming language** in the query.\n  - Example: use **\"Go generics\"** instead of just **\"generics\"**.\n- When applicable, also include **framework + version** (e.g., \"Next.js 14\", \"React 19\", \"Python 3.12\").\n- Include exact identifiers (function/class names, config keys, error messages) when you have them.\n\n## Dynamic Tuning\n\nToken strategy:\n- Focused snippet needed → tokensNum 1000–3000\n- Most tasks → tokensNum 5000\n- Complex integration → tokensNum 10000–20000\n- Only go larger when necessary (avoid dumping large context)\n\n## Output Format (Recommended)\n\nReturn:\n1) Best minimal working snippet(s) (keep it copy/paste friendly)\n2) Notes on version / constraints / gotchas\n3) Sources (URLs if present in returned context)\n\nBefore presenting:\n- Deduplicate similar results and keep only the best representative snippet per approach.\n\n## MCP Configuration\n\n```json\n{\n  \"servers\": {\n    \"exa\": {\n      \"type\": \"http\",\n      \"url\": \"https://mcp.exa.ai/mcp?tools=get_code_context_exa\"\n    }\n  }\n}\n```\n\n\nStep 3: Ask User to Restart Claude Code\n\nYou should ask the user to restart Claude Code to have the config changes take effect.\n````\n\n</details>\n\n<details>\n<summary><b>People Search</b></summary>\n\nCopy the content below and paste it into Claude Code. It will set up the MCP connection and skill for you.\n\n````\nStep 1: Install or update Exa MCP\n\nIf Exa MCP already exists in your MCP configuration, either uninstall it first and install the new one, or update your existing MCP config with this endpoint. Run this command in your terminal:\n\nclaude mcp add --transport http exa \"https://mcp.exa.ai/mcp?tools=web_search_advanced_exa\"\n\n\nStep 2: Add this Claude skill\n\n---\nname: people-research\ndescription: People research using Exa search. Finds LinkedIn profiles, professional backgrounds, experts, team members, and public bios across the web. Use when searching for people, finding experts, or looking up professional profiles.\ncontext: fork\n---\n\n# People Research\n\n## Tool Restriction (Critical)\n\nONLY use `web_search_advanced_exa`. Do NOT use `web_search_exa` or any other Exa tools.\n\n## Token Isolation (Critical)\n\nNever run Exa searches in main context. Always spawn Task agents:\n- Agent runs Exa search internally\n- Agent processes results using LLM intelligence\n- Agent returns only distilled output (compact JSON or brief markdown)\n- Main context stays clean regardless of search volume\n\n## Dynamic Tuning\n\nNo hardcoded numResults. Tune to user intent:\n- User says \"a few\" → 10-20\n- User says \"comprehensive\" → 50-100\n- User specifies number → match it\n- Ambiguous? Ask: \"How many profiles would you like?\"\n\n## Query Variation\n\nExa returns different results for different phrasings. For coverage:\n- Generate 2-3 query variations\n- Run in parallel\n- Merge and deduplicate\n\n## Categories\n\nUse appropriate Exa `category` depending on what you need:\n- `people` → LinkedIn profiles, public bios (primary for discovery)\n- `personal site` → personal blogs, portfolio sites, about pages\n- `news` → press mentions, interviews, speaker bios\n- No category (`type: \"auto\"`) → general web results, broader context\n\nStart with `category: \"people\"` for profile discovery, then use other categories or no category with `livecrawl: \"fallback\"` for deeper research on specific individuals.\n\n### Category-Specific Filter Restrictions\n\nWhen using `category: \"people\"`, these parameters cause errors:\n- `startPublishedDate` / `endPublishedDate`\n- `startCrawlDate` / `endCrawlDate`\n- `includeText` / `excludeText`\n- `excludeDomains`\n- `includeDomains` — **LinkedIn domains only** (e.g., \"linkedin.com\")\n\nWhen searching without a category, all parameters are available (but `includeText`/`excludeText` still only support single-item arrays).\n\n## LinkedIn\n\nPublic LinkedIn via Exa: `category: \"people\"`, no other filters.\nAuth-required LinkedIn → use Claude in Chrome browser fallback.\n\n## Browser Fallback\n\nAuto-fallback to Claude in Chrome when:\n- Exa returns insufficient results\n- Content is auth-gated\n- Dynamic pages need JavaScript\n\n## Examples\n\n### Discovery: find people by role\n```\nweb_search_advanced_exa {\n  \"query\": \"VP Engineering AI infrastructure\",\n  \"category\": \"people\",\n  \"numResults\": 20,\n  \"type\": \"auto\"\n}\n```\n\n### With query variations\n```\nweb_search_advanced_exa {\n  \"query\": \"machine learning engineer San Francisco\",\n  \"category\": \"people\",\n  \"additionalQueries\": [\"ML engineer SF\", \"AI engineer Bay Area\"],\n  \"numResults\": 25,\n  \"type\": \"deep\"\n}\n```\n\n### Deep dive: research a specific person\n```\nweb_search_advanced_exa {\n  \"query\": \"Dario Amodei Anthropic CEO background\",\n  \"type\": \"auto\",\n  \"livecrawl\": \"fallback\",\n  \"numResults\": 15\n}\n```\n\n### News mentions\n```\nweb_search_advanced_exa {\n  \"query\": \"Dario Amodei interview\",\n  \"category\": \"news\",\n  \"numResults\": 10,\n  \"startPublishedDate\": \"2024-01-01\"\n}\n```\n\n## Output Format\n\nReturn:\n1) Results (name, title, company, location if available)\n2) Sources (Profile URLs)\n3) Notes (profile completeness, verification status)\n\n\nStep 3: Ask User to Restart Claude Code\n\nYou should ask the user to restart Claude Code to have the config changes take effect.\n````\n\n</details>\n\n<details>\n<summary><b>Financial Report Search</b></summary>\n\nCopy the content below and paste it into Claude Code. It will set up the MCP connection and skill for you.\n\n````\nStep 1: Install or update Exa MCP\n\nIf Exa MCP already exists in your MCP configuration, either uninstall it first and install the new one, or update your existing MCP config with this endpoint. Run this command in your terminal:\n\nclaude mcp add --transport http exa \"https://mcp.exa.ai/mcp?tools=web_search_advanced_exa\"\n\n\nStep 2: Add this Claude skill\n\n---\nname: web-search-advanced-financial-report\ndescription: Search for financial reports using Exa advanced search. Near-full filter support for finding SEC filings, earnings reports, and financial documents. Use when searching for 10-K filings, quarterly earnings, or annual reports.\ncontext: fork\n---\n\n# Web Search Advanced - Financial Report Category\n\n## Tool Restriction (Critical)\n\nONLY use `web_search_advanced_exa` with `category: \"financial report\"`. Do NOT use other categories or tools.\n\n## Filter Restrictions (Critical)\n\nThe `financial report` category has one known restriction:\n\n- `excludeText` - NOT SUPPORTED (causes 400 error)\n\n## Supported Parameters\n\n### Core\n- `query` (required)\n- `numResults`\n- `type` (\"auto\", \"fast\", \"deep\", \"neural\")\n\n### Domain filtering\n- `includeDomains` (e.g., [\"sec.gov\", \"investor.apple.com\"])\n- `excludeDomains`\n\n### Date filtering (ISO 8601) - Very useful for financial reports!\n- `startPublishedDate` / `endPublishedDate`\n- `startCrawlDate` / `endCrawlDate`\n\n### Text filtering\n- `includeText` (must contain ALL) - **single-item arrays only**; multi-item causes 400\n- ~~`excludeText`~~ - NOT SUPPORTED\n\n### Content extraction\n- `textMaxCharacters` / `contextMaxCharacters`\n- `enableSummary` / `summaryQuery`\n- `enableHighlights` / `highlightsNumSentences` / `highlightsPerUrl` / `highlightsQuery`\n\n### Additional\n- `additionalQueries`\n- `livecrawl` / `livecrawlTimeout`\n- `subpages` / `subpageTarget`\n\n## Token Isolation (Critical)\n\nNever run Exa searches in main context. Always spawn Task agents:\n- Agent calls `web_search_advanced_exa` with `category: \"financial report\"`\n- Agent merges + deduplicates results before presenting\n- Agent returns distilled output (brief markdown or compact JSON)\n- Main context stays clean regardless of search volume\n\n## When to Use\n\nUse this category when you need:\n- SEC filings (10-K, 10-Q, 8-K, S-1)\n- Quarterly earnings reports\n- Annual reports\n- Investor presentations\n- Financial statements\n\n## Examples\n\nSEC filings for a company:\n```\nweb_search_advanced_exa {\n  \"query\": \"Anthropic SEC filing S-1\",\n  \"category\": \"financial report\",\n  \"numResults\": 10,\n  \"type\": \"auto\"\n}\n```\n\nRecent earnings reports:\n```\nweb_search_advanced_exa {\n  \"query\": \"Q4 2025 earnings report technology\",\n  \"category\": \"financial report\",\n  \"startPublishedDate\": \"2025-10-01\",\n  \"numResults\": 20,\n  \"type\": \"auto\"\n}\n```\n\nSpecific filing type:\n```\nweb_search_advanced_exa {\n  \"query\": \"10-K annual report AI companies\",\n  \"category\": \"financial report\",\n  \"includeDomains\": [\"sec.gov\"],\n  \"startPublishedDate\": \"2025-01-01\",\n  \"numResults\": 15,\n  \"type\": \"deep\"\n}\n```\n\nRisk factors analysis:\n```\nweb_search_advanced_exa {\n  \"query\": \"risk factors cybersecurity\",\n  \"category\": \"financial report\",\n  \"includeText\": [\"cybersecurity\"],\n  \"numResults\": 10,\n  \"enableHighlights\": true,\n  \"highlightsQuery\": \"What are the main cybersecurity risks?\"\n}\n```\n\n## Output Format\n\nReturn:\n1) Results (company name, filing type, date, key figures/highlights)\n2) Sources (Filing URLs)\n3) Notes (reporting period, any restatements, auditor notes)\n\n\nStep 3: Ask User to Restart Claude Code\n\nYou should ask the user to restart Claude Code to have the config changes take effect.\n````\n\n</details>\n\n<details>\n<summary><b>Research Paper Search</b></summary>\n\nCopy the content below and paste it into Claude Code. It will set up the MCP connection and skill for you.\n\n````\nStep 1: Install or update Exa MCP\n\nIf Exa MCP already exists in your MCP configuration, either uninstall it first and install the new one, or update your existing MCP config with this endpoint. Run this command in your terminal:\n\nclaude mcp add --transport http exa \"https://mcp.exa.ai/mcp?tools=web_search_advanced_exa\"\n\n\nStep 2: Add this Claude skill\n\n---\nname: web-search-advanced-research-paper\ndescription: Search for research papers and academic content using Exa advanced search. Full filter support including date ranges and text filtering. Use when searching for academic papers, arXiv preprints, or scientific research.\ncontext: fork\n---\n\n# Web Search Advanced - Research Paper Category\n\n## Tool Restriction (Critical)\n\nONLY use `web_search_advanced_exa` with `category: \"research paper\"`. Do NOT use other categories or tools.\n\n## Full Filter Support\n\nThe `research paper` category supports ALL available parameters:\n\n### Core\n- `query` (required)\n- `numResults`\n- `type` (\"auto\", \"fast\", \"deep\", \"neural\")\n\n### Domain filtering\n- `includeDomains` (e.g., [\"arxiv.org\", \"openreview.net\"])\n- `excludeDomains`\n\n### Date filtering (ISO 8601)\n- `startPublishedDate` / `endPublishedDate`\n- `startCrawlDate` / `endCrawlDate`\n\n### Text filtering\n- `includeText` (must contain ALL)\n- `excludeText` (exclude if ANY match)\n\n**Array size restriction:** `includeText` and `excludeText` only support **single-item arrays**. Multi-item arrays (2+ items) cause 400 errors. To match multiple terms, put them in the `query` string or run separate searches.\n\n### Content extraction\n- `textMaxCharacters` / `contextMaxCharacters`\n- `enableSummary` / `summaryQuery`\n- `enableHighlights` / `highlightsNumSentences` / `highlightsPerUrl` / `highlightsQuery`\n\n### Additional\n- `userLocation`\n- `moderation`\n- `additionalQueries`\n- `livecrawl` / `livecrawlTimeout`\n- `subpages` / `subpageTarget`\n\n## Token Isolation (Critical)\n\nNever run Exa searches in main context. Always spawn Task agents:\n- Agent calls `web_search_advanced_exa` with `category: \"research paper\"`\n- Agent merges + deduplicates results before presenting\n- Agent returns distilled output (brief markdown or compact JSON)\n- Main context stays clean regardless of search volume\n\n## When to Use\n\nUse this category when you need:\n- Academic papers from arXiv, OpenReview, PubMed, etc.\n- Scientific research on specific topics\n- Literature reviews with date filtering\n- Papers containing specific methodologies or terms\n\n## Examples\n\nRecent papers on a topic:\n```\nweb_search_advanced_exa {\n  \"query\": \"transformer attention mechanisms efficiency\",\n  \"category\": \"research paper\",\n  \"startPublishedDate\": \"2024-01-01\",\n  \"numResults\": 15,\n  \"type\": \"auto\"\n}\n```\n\nPapers from specific venues:\n```\nweb_search_advanced_exa {\n  \"query\": \"large language model agents\",\n  \"category\": \"research paper\",\n  \"includeDomains\": [\"arxiv.org\", \"openreview.net\"],\n  \"includeText\": [\"LLM\"],\n  \"numResults\": 20,\n  \"type\": \"deep\"\n}\n```\n\n## Output Format\n\nReturn:\n1) Results (structured list with title, authors, date, abstract summary)\n2) Sources (URLs with publication venue)\n3) Notes (methodology differences, conflicting findings)\n\n\nStep 3: Ask User to Restart Claude Code\n\nYou should ask the user to restart Claude Code to have the config changes take effect.\n````\n\n</details>\n\n<details>\n<summary><b>Personal Site Search</b></summary>\n\nCopy the content below and paste it into Claude Code. It will set up the MCP connection and skill for you.\n\n````\nStep 1: Install or update Exa MCP\n\nIf Exa MCP already exists in your MCP configuration, either uninstall it first and install the new one, or update your existing MCP config with this endpoint. Run this command in your terminal:\n\nclaude mcp add --transport http exa \"https://mcp.exa.ai/mcp?tools=web_search_advanced_exa\"\n\n\nStep 2: Add this Claude skill\n\n---\nname: web-search-advanced-personal-site\ndescription: Search personal websites and blogs using Exa advanced search. Full filter support for finding individual perspectives, portfolios, and personal blogs. Use when searching for personal sites, blog posts, or portfolio websites.\ncontext: fork\n---\n\n# Web Search Advanced - Personal Site Category\n\n## Tool Restriction (Critical)\n\nONLY use `web_search_advanced_exa` with `category: \"personal site\"`. Do NOT use other categories or tools.\n\n## Full Filter Support\n\nThe `personal site` category supports ALL available parameters:\n\n### Core\n- `query` (required)\n- `numResults`\n- `type` (\"auto\", \"fast\", \"deep\", \"neural\")\n\n### Domain filtering\n- `includeDomains`\n- `excludeDomains` (e.g., exclude Medium if you want independent blogs)\n\n### Date filtering (ISO 8601)\n- `startPublishedDate` / `endPublishedDate`\n- `startCrawlDate` / `endCrawlDate`\n\n### Text filtering\n- `includeText` (must contain ALL)\n- `excludeText` (exclude if ANY match)\n\n**Array size restriction:** `includeText` and `excludeText` only support **single-item arrays**. Multi-item arrays (2+ items) cause 400 errors. To match multiple terms, put them in the `query` string or run separate searches.\n\n### Content extraction\n- `textMaxCharacters` / `contextMaxCharacters`\n- `enableSummary` / `summaryQuery`\n- `enableHighlights` / `highlightsNumSentences` / `highlightsPerUrl` / `highlightsQuery`\n\n### Additional\n- `additionalQueries`\n- `livecrawl` / `livecrawlTimeout`\n- `subpages` / `subpageTarget` - useful for exploring portfolio sites\n\n## Token Isolation (Critical)\n\nNever run Exa searches in main context. Always spawn Task agents:\n- Agent calls `web_search_advanced_exa` with `category: \"personal site\"`\n- Agent merges + deduplicates results before presenting\n- Agent returns distilled output (brief markdown or compact JSON)\n- Main context stays clean regardless of search volume\n\n## When to Use\n\nUse this category when you need:\n- Individual expert opinions and experiences\n- Personal blog posts on technical topics\n- Portfolio websites\n- Independent analysis (not corporate content)\n- Deep dives and tutorials from practitioners\n\n## Examples\n\nTechnical blog posts:\n```\nweb_search_advanced_exa {\n  \"query\": \"building production LLM applications lessons learned\",\n  \"category\": \"personal site\",\n  \"numResults\": 15,\n  \"type\": \"deep\",\n  \"enableSummary\": true\n}\n```\n\nRecent posts on a topic:\n```\nweb_search_advanced_exa {\n  \"query\": \"Rust async runtime comparison\",\n  \"category\": \"personal site\",\n  \"startPublishedDate\": \"2025-01-01\",\n  \"numResults\": 10,\n  \"type\": \"auto\"\n}\n```\n\nExclude aggregators:\n```\nweb_search_advanced_exa {\n  \"query\": \"startup founder lessons\",\n  \"category\": \"personal site\",\n  \"excludeDomains\": [\"medium.com\", \"substack.com\"],\n  \"numResults\": 15,\n  \"type\": \"auto\"\n}\n```\n\n## Output Format\n\nReturn:\n1) Results (title, author/site name, date, key insights)\n2) Sources (URLs)\n3) Notes (author expertise, potential biases, depth of coverage)\n\n\nStep 3: Ask User to Restart Claude Code\n\nYou should ask the user to restart Claude Code to have the config changes take effect.\n````\n\n</details>\n\n<details>\n<summary><b>X/Twitter Search</b></summary>\n\nCopy the content below and paste it into Claude Code. It will set up the MCP connection and skill for you.\n\n````\nStep 1: Install or update Exa MCP\n\nIf Exa MCP already exists in your MCP configuration, either uninstall it first and install the new one, or update your existing MCP config with this endpoint. Run this command in your terminal:\n\nclaude mcp add --transport http exa \"https://mcp.exa.ai/mcp?tools=web_search_advanced_exa\"\n\n\nStep 2: Add this Claude skill\n\n---\nname: web-search-advanced-tweet\ndescription: Search tweets and Twitter/X content using Exa advanced search. Limited filter support - text and domain filters are NOT supported. Use when searching for tweets, Twitter/X discussions, or social media sentiment.\ncontext: fork\n---\n\n# Web Search Advanced - Tweet Category\n\n## Tool Restriction (Critical)\n\nONLY use `web_search_advanced_exa` with `category: \"tweet\"`. Do NOT use other categories or tools.\n\n## Filter Restrictions (Critical)\n\nThe `tweet` category has **LIMITED filter support**. The following parameters are **NOT supported** and will cause 400 errors:\n\n- `includeText` - NOT SUPPORTED\n- `excludeText` - NOT SUPPORTED\n- `includeDomains` - NOT SUPPORTED\n- `excludeDomains` - NOT SUPPORTED\n- `moderation` - NOT SUPPORTED (causes 500 server error)\n\n## Supported Parameters\n\n### Core\n- `query` (required)\n- `numResults`\n- `type` (\"auto\", \"fast\", \"deep\", \"neural\")\n\n### Date filtering (ISO 8601) - Use these instead of text filters!\n- `startPublishedDate` / `endPublishedDate`\n- `startCrawlDate` / `endCrawlDate`\n\n### Content extraction\n- `textMaxCharacters` / `contextMaxCharacters`\n- `enableHighlights` / `highlightsNumSentences` / `highlightsPerUrl` / `highlightsQuery`\n- `enableSummary` / `summaryQuery`\n\n### Additional\n- `additionalQueries` - useful for hashtag variations\n- `livecrawl` / `livecrawlTimeout` - use \"preferred\" for recent tweets\n\n## Token Isolation (Critical)\n\nNever run Exa searches in main context. Always spawn Task agents:\n- Agent calls `web_search_advanced_exa` with `category: \"tweet\"`\n- Agent merges + deduplicates results before presenting\n- Agent returns distilled output (brief markdown or compact JSON)\n- Main context stays clean regardless of search volume\n\n## When to Use\n\nUse this category when you need:\n- Social discussions on a topic\n- Product announcements from company accounts\n- Developer opinions and experiences\n- Trending topics and community sentiment\n- Expert takes and threads\n\n## Examples\n\nRecent tweets on a topic:\n```\nweb_search_advanced_exa {\n  \"query\": \"Claude Code MCP experience\",\n  \"category\": \"tweet\",\n  \"startPublishedDate\": \"2025-01-01\",\n  \"numResults\": 20,\n  \"type\": \"auto\",\n  \"livecrawl\": \"preferred\"\n}\n```\n\nSearch with specific keywords (put keywords in query, not includeText):\n```\nweb_search_advanced_exa {\n  \"query\": \"launching announcing new open source release\",\n  \"category\": \"tweet\",\n  \"startPublishedDate\": \"2025-12-01\",\n  \"numResults\": 15,\n  \"type\": \"auto\"\n}\n```\n\nDeveloper sentiment (use specific query terms instead of excludeText):\n```\nweb_search_advanced_exa {\n  \"query\": \"developer experience DX frustrating painful\",\n  \"category\": \"tweet\",\n  \"numResults\": 20,\n  \"type\": \"deep\",\n  \"livecrawl\": \"preferred\"\n}\n```\n\n## Output Format\n\nReturn:\n1) Results (tweet content, author handle, date, engagement if visible)\n2) Sources (Tweet URLs)\n3) Notes (sentiment summary, notable accounts, threads vs single tweets)\n\nImportant: Be aware that tweet content can be informal, sarcastic, or context-dependent.\n\n\nStep 3: Ask User to Restart Claude Code\n\nYou should ask the user to restart Claude Code to have the config changes take effect.\n````\n\n</details>\n\n## Links\n\n- [Documentation](https://docs.exa.ai/reference/exa-mcp)\n- [npm Package](https://www.npmjs.com/package/exa-mcp-server)\n- [Get Your Exa API Key](https://dashboard.exa.ai/api-keys)\n\n\n<br>\n\nBuilt with ❤️ by Exa\n"
  },
  {
    "path": "VERCEL_DEPLOYMENT_GUIDE.md",
    "content": "# Vercel Deployment Guide\n\nYour Exa MCP Server is ready to deploy to Vercel with **100% compatibility** with the existing Smithery deployment.\n\n## ✅ Quick Start Checklist\n\n### Step 1: Install Dependencies (5 min)\n```bash\ncd /Users/ishangoswami/Desktop/exa/exa-mcp-server/exa-mcp-server\nnpm install\n```\n\n### Step 2: Configure Environment (2 min)\n```bash\ncp env.example .env.local\n# Edit .env.local and add your EXA_API_KEY\n```\n\n### Step 3: Test Locally (5 min)\n```bash\nnpm run dev:vercel\n# Server will run at http://localhost:3000/api/mcp\n```\n\n### Step 4: Test with MCP Inspector (5 min)\n```bash\nnpx @modelcontextprotocol/inspector\n# Connect to: http://localhost:3000/api/mcp\n# Test the tools work\n```\n\n### Step 5: Deploy to Vercel (10 min)\n```bash\n# Install Vercel CLI\nnpm i -g vercel\n\n# Login\nvercel login\n\n# Deploy preview\nvercel\n\n# Add environment variable\nvercel env add EXA_API_KEY\n# Paste your key, select all environments\n\n# Deploy production\nvercel --prod\n```\n\n### Step 6: Update Client Configurations (5 min)\n\n**Cursor (.cursor/mcp.json):**\n```json\n{\n  \"mcpServers\": {\n    \"exa\": {\n      \"url\": \"https://exa-mcp-server-nine.vercel.app/api/mcp\"\n    }\n  }\n}\n```\n\n**Claude Desktop:**\n```json\n{\n  \"mcpServers\": {\n    \"exa\": {\n      \"url\": \"https://your-project.vercel.app/api/mcp\",\n      \"transport\": \"streamable-http\"\n    }\n  }\n}\n```\n\n---\n\n## 🎯 What Changed & Compatibility\n\n### ✅ 100% Compatible Features\n\nYour Vercel deployment maintains **complete compatibility** with `https://mcp.exa.ai/mcp`:\n\n| Feature | Status |\n|---------|--------|\n| **URL Parameters** | |\n| `?tools=web_search_exa,get_code_context_exa` | ✅ Works identically |\n| `?exaApiKey=YOUR_KEY` | ✅ Works identically |\n| `?debug=true` | ✅ Works identically |\n| **Default Tools** | |\n| `web_search_exa` + `get_code_context_exa` enabled by default | ✅ Preserved |\n| All 8 tools available | ✅ Unchanged |\n| **Tool Behavior** | |\n| All tool implementations | ✅ Zero changes |\n| Tool names and parameters | ✅ Identical |\n| **MCP Protocol** | |\n| Streamable HTTP transport | ✅ Same |\n| All MCP clients supported | ✅ Same |\n\n### 📦 What Was Added (Non-Breaking)\n\n**New Files:**\n- `api/mcp.ts` - Vercel Function entry point (supports URL parameters)\n- `src/mcp-handler.ts` - Shared logic used by both Smithery and Vercel\n- `vercel.json` - Vercel configuration\n- `.vercelignore` - Deployment exclusions\n- `env.example` - Environment template\n\n**New Dependencies:**\n- `mcp-handler` - Vercel's MCP wrapper\n- `vercel` - Vercel CLI (dev dependency)\n\n**Modified Files:**\n- `package.json` - Added new dependencies and scripts\n- `src/index.ts` - Refactored to use shared logic (Smithery still works!)\n- `tsconfig.json` - Updated for Vercel compatibility\n\n**Zero tool changes** - All files in `src/tools/` are unchanged!\n\n---\n\n## 🔧 URL Parameters Support\n\nYour deployment supports the same URL parameters as the hosted version:\n\n### Examples:\n```bash\n# Enable specific tools\nhttps://your-project.vercel.app/api/mcp?tools=web_search_exa,get_code_context_exa\n\n# Pass API key in URL\nhttps://your-project.vercel.app/api/mcp?exaApiKey=YOUR_KEY\n\n# Enable debug mode\nhttps://your-project.vercel.app/api/mcp?debug=true\n\n# Combine parameters\nhttps://your-project.vercel.app/api/mcp?tools=web_search_exa&exaApiKey=KEY&debug=true\n```\n\n### Available Tools:\n- `web_search_exa` (default: ON)\n- `get_code_context_exa` (default: ON)\n- `crawling_exa` (default: OFF)\n- `company_research_exa` (default: OFF)\n- `people_search_exa` (default: OFF)\n- `linkedin_search_exa` (default: OFF, **deprecated** - use `people_search_exa`)\n- `deep_researcher_start` (default: OFF)\n- `deep_researcher_check` (default: OFF)\n\n### Configuration Priority:\n```\nURL Parameter > Environment Variable > Default Value\n```\n\n---\n\n## 🧪 Testing Your Deployment\n\n### Local Testing\n```bash\n# Test default (should enable web_search_exa + get_code_context_exa)\ncurl http://localhost:3000/api/mcp\n\n# Test specific tool\ncurl \"http://localhost:3000/api/mcp?tools=web_search_exa\"\n\n# Test with API key\ncurl \"http://localhost:3000/api/mcp?exaApiKey=YOUR_KEY\"\n```\n\n### Production Testing\n```bash\n# Test default configuration\ncurl https://your-project.vercel.app/api/mcp\n\n# Test with MCP Inspector\nnpx @modelcontextprotocol/inspector\n# URL: https://your-project.vercel.app/api/mcp\n\n# Test with your MCP client (Cursor, Claude Desktop, etc.)\n```\n\n---\n\n## 🚀 Deployment Commands Reference\n\n```bash\n# Development\nnpm run dev:vercel              # Start local Vercel dev server\n\n# Deployment\nvercel                          # Deploy to preview\nvercel --prod                   # Deploy to production\n\n# Environment Variables\nvercel env add VAR_NAME         # Add environment variable\nvercel env rm VAR_NAME          # Remove environment variable\nvercel env ls                   # List all variables\n\n# Monitoring\nvercel logs                     # View function logs\nvercel logs --prod              # View production logs\nvercel ls                       # List deployments\n\n# Rollback\nvercel promote <deployment-url> # Promote a previous deployment\n```\n\n---\n\n## 📊 Environment Variables\n\nSet these in Vercel dashboard or via CLI:\n\n| Variable | Required | Description | Default |\n|----------|----------|-------------|---------|\n| `EXA_API_KEY` | Yes | Your Exa AI API key | - |\n| `DEBUG` | No | Enable debug logging | `false` |\n| `ENABLED_TOOLS` | No | Comma-separated tool list | `web_search_exa,get_code_context_exa` |\n\n**Add via CLI:**\n```bash\nvercel env add EXA_API_KEY\n# Paste your key when prompted\n# Select all environments (Production, Preview, Development)\n```\n\n---\n\n## 🔍 Troubleshooting\n\n### Issue: \"Module not found: mcp-handler\"\n**Solution:** Run `npm install`\n\n### Issue: \"EXA_API_KEY not found\"\n**Solution:**\n1. Check Vercel dashboard → Settings → Environment Variables\n2. Add `EXA_API_KEY` if missing\n3. Redeploy: `vercel --prod`\n\n### Issue: Function timeout\n**Problem:** Free tier has 10s timeout\n**Solution:**\n- Upgrade to Vercel Pro for 60s timeout\n- Or use only faster tools (avoid `deep_researcher_*`)\n\n### Issue: Tools not working\n**Debug steps:**\n1. Enable debug: `vercel env add DEBUG` (value: `true`)\n2. Redeploy: `vercel --prod`\n3. Check logs: `vercel logs --prod`\n4. Test with `?debug=true` in URL\n\n### Issue: Cold starts are slow\n**This is normal!** First request after idle takes 1-2s. Subsequent requests are fast.\n\n---\n\n## 📋 Customer Migration Guide\n\n### For Existing Customers\n\n**Only ONE change needed:**\n\n**Before:**\n```\nhttps://mcp.exa.ai/mcp?tools=web_search_exa,get_code_context_exa\n```\n\n**After:**\n```\nhttps://your-project.vercel.app/api/mcp?tools=web_search_exa,get_code_context_exa\n```\n\nEverything else works identically!\n\n### Migration Options\n\n**Option 1: Zero Downtime (Recommended)**\n1. Deploy to Vercel (new URL)\n2. Keep Smithery running (old URL)\n3. Gradually update customer configurations\n4. Deprecate Smithery when ready\n\n**Option 2: Direct Cutover**\n1. Deploy to Vercel\n2. Announce URL change\n3. Customers update config (single line)\n4. Immediate switch\n\n---\n\n## 🎯 Success Criteria\n\nYou'll know it works when:\n\n- [x] `npm install` completes without errors\n- [x] Local server runs at `http://localhost:3000`\n- [x] MCP Inspector connects successfully\n- [x] Default tools are `web_search_exa` + `get_code_context_exa`\n- [x] URL parameter `?tools=web_search_exa` works\n- [x] Production deployment succeeds\n- [x] Vercel logs show no errors\n- [x] MCP clients can connect and use tools\n\n---\n\n## 📞 Support & Resources\n\n- **Vercel Dashboard:** https://vercel.com/dashboard\n- **Vercel Docs:** https://vercel.com/docs\n- **MCP Specification:** https://modelcontextprotocol.io\n- **Exa API:** https://docs.exa.ai\n\n---\n\n## ⚡ Quick Commands Summary\n\n```bash\n# Setup\nnpm install\ncp env.example .env.local\n# Edit .env.local with your EXA_API_KEY\n\n# Local Development\nnpm run dev:vercel\n\n# Deploy\nvercel login\nvercel                    # Preview\nvercel env add EXA_API_KEY\nvercel --prod             # Production\n\n# Test\nnpx @modelcontextprotocol/inspector\n# Connect to: http://localhost:3000/api/mcp\n\n# Monitor\nvercel logs --prod\n```\n\n---\n\n**Ready to deploy!** 🚀\n\nThe Vercel deployment is a drop-in replacement for your Smithery deployment with zero breaking changes. Only the URL domain needs to change.\n\n"
  },
  {
    "path": "api/mcp.ts",
    "content": "process.env.AGNOST_LOG_LEVEL = 'error';\n\nimport { createMcpHandler } from 'mcp-handler';\nimport { initializeMcpServer } from '../src/mcp-handler.js';\nimport { Ratelimit } from '@upstash/ratelimit';\nimport { Redis } from '@upstash/redis';\n\n/**\n * IP-based rate limiting configuration for free MCP users.\n * Users who provide their own API key via ?exaApiKey= bypass rate limiting.\n * \n * Rate limiting only applies to actual tool calls (tools/call method), not to\n * basic MCP protocol methods like tools/list, initialize, ping, etc.\n * \n * Environment variables (supports both Vercel KV and Upstash naming):\n * - KV_REST_API_URL or UPSTASH_REDIS_REST_URL: Redis connection URL\n * - KV_REST_API_TOKEN or UPSTASH_REDIS_REST_TOKEN: Redis auth token\n * - RATE_LIMIT_QPS: Queries per second limit (default: 2)\n * - RATE_LIMIT_DAILY: Daily request quota (default: 50)\n */\n\n// Lazy-initialize rate limiters only when Upstash is configured\nlet qpsLimiter: Ratelimit | null = null;\nlet dailyLimiter: Ratelimit | null = null;\nlet rateLimitersInitialized = false;\nlet redisClient: Redis | null = null;\n\nfunction initializeRateLimiters(): boolean {\n  if (rateLimitersInitialized) {\n    return qpsLimiter !== null;\n  }\n  \n  rateLimitersInitialized = true;\n  \n  // Support both Vercel KV naming (KV_REST_API_*) and Upstash naming (UPSTASH_REDIS_REST_*)\n  const redisUrl = process.env.KV_REST_API_URL || process.env.UPSTASH_REDIS_REST_URL;\n  const redisToken = process.env.KV_REST_API_TOKEN || process.env.UPSTASH_REDIS_REST_TOKEN;\n  \n  if (!redisUrl || !redisToken) {\n    console.log('[EXA-MCP] Rate limiting disabled: KV_REST_API_URL/UPSTASH_REDIS_REST_URL or KV_REST_API_TOKEN/UPSTASH_REDIS_REST_TOKEN not configured');\n    return false;\n  }\n  \n  try {\n    redisClient = new Redis({\n      url: redisUrl,\n      token: redisToken,\n    });\n    \n    const qpsLimit = parseInt(process.env.RATE_LIMIT_QPS || '2', 10);\n    const dailyLimit = parseInt(process.env.RATE_LIMIT_DAILY || '50', 10);\n    \n    // QPS limiter: sliding window for smooth rate limiting\n    qpsLimiter = new Ratelimit({\n      redis: redisClient,\n      limiter: Ratelimit.slidingWindow(qpsLimit, '1 s'),\n      prefix: 'exa-mcp:qps',\n    });\n    \n    // Daily limiter: fixed window that resets daily\n    dailyLimiter = new Ratelimit({\n      redis: redisClient,\n      limiter: Ratelimit.fixedWindow(dailyLimit, '1 d'),\n      prefix: 'exa-mcp:daily',\n    });\n    \n    console.log(`[EXA-MCP] Rate limiting enabled: ${qpsLimit} QPS, ${dailyLimit}/day`);\n    return true;\n  } catch (error) {\n    console.error('[EXA-MCP] Failed to initialize rate limiters:', error);\n    return false;\n  }\n}\n\nfunction getClientIp(request: Request): string {\n  const cfConnectingIp = request.headers.get('cf-connecting-ip');\n  const xRealIp = request.headers.get('x-real-ip');\n  const xForwardedFor = request.headers.get('x-forwarded-for');\n  const xForwardedForFirst = xForwardedFor?.split(',')[0]?.trim();\n\n  return cfConnectingIp ?? xRealIp ?? xForwardedForFirst ?? 'unknown';\n}\n\nconst RATE_LIMIT_ERROR_MESSAGE = `You've hit Exa's free MCP rate limit. To continue using without limits, create your own Exa API key.\n\nFix: Create API key at https://dashboard.exa.ai/api-keys , then either:\n- Set the header: Authorization: Bearer YOUR_EXA_API_KEY\n- Or use the URL: https://mcp.exa.ai/mcp?exaApiKey=YOUR_EXA_API_KEY`;\n\n/**\n * Create a JSON-RPC 2.0 error response for rate limiting.\n * MCP uses JSON-RPC 2.0, so we need to return errors in the proper format.\n * Note: We intentionally hide rate limit dimension info (limit set to 0) to prevent\n * users from inferring which limit they hit (QPS vs daily).\n */\nfunction createRateLimitResponse(retryAfterSeconds: number, reset: number): Response {\n  return new Response(\n    JSON.stringify({\n      jsonrpc: '2.0',\n      error: {\n        code: -32000,\n        message: RATE_LIMIT_ERROR_MESSAGE,\n      },\n      id: null,\n    }),\n    {\n      status: 429,\n      headers: {\n        'Content-Type': 'application/json',\n        'Retry-After': String(retryAfterSeconds),\n        'X-RateLimit-Limit': '0',\n        'X-RateLimit-Remaining': '0',\n        'X-RateLimit-Reset': String(reset),\n      },\n    }\n  );\n}\n\n/**\n * Check if a JSON-RPC request is a tools/call method that should be rate limited.\n * Returns true only for actual tool invocations, not for protocol methods like\n * tools/list, initialize, ping, resources/list, prompts/list, etc.\n */\nfunction isRateLimitedMethod(body: string): boolean {\n  try {\n    const parsed = JSON.parse(body);\n    return parsed.method === 'tools/call';\n  } catch {\n    return false;\n  }\n}\n\n/**\n * Save IP and user agent for bypass requests to Redis for tracking.\n * Uses a sorted set with timestamp as score for easy time-based queries.\n */\nasync function saveBypassRequestInfo(ip: string, userAgent: string, debug: boolean): Promise<void> {\n  initializeRateLimiters();\n  \n  if (!redisClient) {\n    if (debug) {\n      console.log('[EXA-MCP] Cannot save bypass info: Redis not configured');\n    }\n    return;\n  }\n  \n  try {\n    const timestamp = Date.now();\n    const entry = JSON.stringify({ ip, userAgent, timestamp });\n    \n    await redisClient.zadd('exa-mcp:bypass-requests', { score: timestamp, member: entry });\n    \n    if (debug) {\n      console.log(`[EXA-MCP] Saved bypass request info for IP: ${ip}`);\n    }\n  } catch (error) {\n    console.error('[EXA-MCP] Failed to save bypass request info:', error);\n  }\n}\n\n/**\n * Check rate limits for a given IP.\n * Returns null if within limits, or a Response if rate limited.\n */\nasync function checkRateLimits(ip: string, debug: boolean): Promise<Response | null> {\n  if (!qpsLimiter || !dailyLimiter) {\n    return null; // Rate limiting not configured\n  }\n  \n  try {\n    // Check QPS limit first (more likely to be hit)\n    const qpsResult = await qpsLimiter.limit(ip);\n    if (!qpsResult.success) {\n      if (debug) {\n        console.log(`[EXA-MCP] QPS rate limit exceeded for IP: ${ip}`);\n      }\n      const retryAfter = Math.ceil((qpsResult.reset - Date.now()) / 1000);\n      return createRateLimitResponse(retryAfter, qpsResult.reset);\n    }\n    \n    // Check daily limit\n    const dailyResult = await dailyLimiter.limit(ip);\n    if (!dailyResult.success) {\n      if (debug) {\n        console.log(`[EXA-MCP] Daily rate limit exceeded for IP: ${ip}`);\n      }\n      const retryAfter = Math.ceil((dailyResult.reset - Date.now()) / 1000);\n      return createRateLimitResponse(retryAfter, dailyResult.reset);\n    }\n    \n    return null; // Within limits\n  } catch (error) {\n    // If rate limiting fails, allow the request through (fail open)\n    console.error('[EXA-MCP] Rate limit check failed:', error);\n    return null;\n  }\n}\n\n/**\n * Vercel Function entry point for MCP server\n * \n * This handler is automatically deployed as a Vercel Function and provides\n * Streamable HTTP transport for the MCP protocol.\n * \n * Supports API key via header (recommended) or URL query parameter:\n * - Authorization: Bearer YOUR_KEY - Pass API key via header (recommended)\n * - ?exaApiKey=YOUR_KEY - Pass API key via URL (backwards compatible)\n * \n * Other URL query parameters:\n * - ?tools=web_search_exa,get_code_context_exa - Enable specific tools\n * - ?debug=true - Enable debug logging\n * \n * Also supports environment variables:\n * - EXA_API_KEY: Your Exa AI API key\n * - DEBUG: Enable debug logging (true/false)\n * - ENABLED_TOOLS: Comma-separated list of tools to enable\n * \n * Priority: header > URL query parameter > environment variable.\n * \n * ARCHITECTURE NOTE:\n * The mcp-handler library creates a single server instance and doesn't pass\n * the request to the initializeServer callback. To support per-request\n * configuration via URL params (like ?tools=... and ?exaApiKey=...), we\n * create a fresh handler for each request. This ensures:\n * 1. Feature parity with the production Smithery-based deployment at mcp.exa.ai\n * 2. Each request gets its own configuration (no API key leakage between users)\n * 3. Users can specify different tools and API keys per request\n */\n\n/** Extract API key from Authorization: Bearer header. */\nfunction getApiKeyFromHeader(request: Request): string | undefined {\n  const authHeader = request.headers.get('authorization');\n  if (authHeader) {\n    const match = authHeader.match(/^Bearer\\s+(.+)$/i);\n    if (match && match[1]) {\n      return match[1];\n    }\n  }\n  return undefined;\n}\n\n/**\n * Extract configuration from request headers, URL, or environment variables.\n * Priority: header > query parameter > environment variable.\n */\nfunction getConfigFromRequest(request: Request) {\n  let exaApiKey = process.env.EXA_API_KEY;\n  let enabledTools: string[] | undefined;\n  let debug = process.env.DEBUG === 'true';\n  let userProvidedApiKey = false;\n\n  // 1. Check Authorization: Bearer header (highest priority)\n  const headerApiKey = getApiKeyFromHeader(request);\n  if (headerApiKey) {\n    exaApiKey = headerApiKey;\n    userProvidedApiKey = true;\n  }\n\n  try {\n    const parsedUrl = new URL(request.url);\n    const params = parsedUrl.searchParams;\n\n    // 2. Check ?exaApiKey=YOUR_KEY (fallback for backwards compat, only if no header)\n    if (!headerApiKey && params.has('exaApiKey')) {\n      const keyFromUrl = params.get('exaApiKey');\n      if (keyFromUrl) {\n        exaApiKey = keyFromUrl;\n        userProvidedApiKey = true;\n      }\n    }\n\n    // Support ?tools=tool1,tool2\n    if (params.has('tools')) {\n      const toolsParam = params.get('tools');\n      if (toolsParam) {\n        enabledTools = toolsParam\n          .split(',')\n          .map(t => t.trim())\n          .filter(t => t.length > 0);\n      }\n    }\n\n    // Support ?debug=true\n    if (params.has('debug')) {\n      debug = params.get('debug') === 'true';\n    }\n  } catch (error) {\n    // URL parsing failed, will use env vars\n    if (debug) {\n      console.error('Failed to parse request URL:', error);\n    }\n  }\n\n  // Fall back to env vars if no query params were found\n  if (!enabledTools && process.env.ENABLED_TOOLS) {\n    enabledTools = process.env.ENABLED_TOOLS\n      .split(',')\n      .map(t => t.trim())\n      .filter(t => t.length > 0);\n  }\n\n  return { exaApiKey, enabledTools, debug, userProvidedApiKey };\n}\n\n/**\n * Create a fresh handler for the given configuration\n * We create a new handler per request to ensure each request gets its own\n * configuration (tools and API key). This prevents API key leakage between\n * different users who might pass different keys via URL.\n */\nfunction createHandler(config: { exaApiKey?: string; enabledTools?: string[]; debug: boolean; userProvidedApiKey: boolean }) {\n  return createMcpHandler(\n    (server: any) => {\n      initializeMcpServer(server, config);\n    },\n    {}, // Server options\n    { basePath: '/api' } // Config - basePath for Vercel Functions\n  );\n}\n\n/**\n * Main request handler that extracts config from URL and creates\n * a fresh handler for each request\n */\nasync function handleRequest(request: Request): Promise<Response> {\n  // Extract configuration from request headers, URL, and env vars\n  const config = getConfigFromRequest(request);\n  \n  if (config.debug) {\n    console.log(`[EXA-MCP] Request URL: ${request.url}`);\n    console.log(`[EXA-MCP] Enabled tools: ${config.enabledTools?.join(', ') || 'default'}`);\n    console.log(`[EXA-MCP] API key provided: ${config.userProvidedApiKey ? 'yes (user provided via header or query param)' : 'no (using env var)'}`);\n  }\n  \n  const userAgent = request.headers.get('user-agent') || '';\n  const bypassPrefix = process.env.RATE_LIMIT_BYPASS;\n  const bypassApiKey = process.env.EXA_API_KEY_BYPASS;\n  // Only allow bypass if BOTH prefix matches AND bypass API key is configured\n  // This ensures bypass users always use a dedicated key for tracking/billing\n  const bypassRateLimit = bypassPrefix && bypassApiKey && userAgent.startsWith(bypassPrefix);\n  \n  // Use separate API key for bypass users and save their IP/user-agent for tracking\n  if (bypassRateLimit) {\n    config.exaApiKey = bypassApiKey;\n    const clientIp = getClientIp(request);\n    saveBypassRequestInfo(clientIp, userAgent, config.debug);\n  }\n  \n  // Rate limit users who didn't provide their own API key (including bypass users)\n  // Only rate limit actual tool calls (tools/call), not protocol methods like tools/list\n  if (!config.userProvidedApiKey && request.method === 'POST') {\n    // Clone the request to read the body without consuming it\n    const clonedRequest = request.clone();\n    const body = await clonedRequest.text();\n    \n    // Only rate limit actual tool calls, not protocol methods\n    if (isRateLimitedMethod(body)) {\n      // Initialize rate limiters on first request (lazy init)\n      initializeRateLimiters();\n      \n      const clientIp = getClientIp(request);\n      \n      if (config.debug) {\n        console.log(`[EXA-MCP] Client IP: ${clientIp}, method: tools/call`);\n      }\n      \n      const rateLimitResponse = await checkRateLimits(clientIp, config.debug);\n      if (rateLimitResponse) {\n        return rateLimitResponse;\n      }\n    } else if (config.debug) {\n      console.log(`[EXA-MCP] Skipping rate limit for non-tool-call method`);\n    }\n  }\n  \n  // Create a fresh handler for this request's configuration\n  const handler = createHandler(config);\n  \n  // Normalize URL pathname to /api/mcp for mcp-handler (it checks url.pathname)\n  // This handles requests from /mcp and / rewrites\n  const url = new URL(request.url);\n  if (url.pathname === '/mcp' || url.pathname === '/') {\n    url.pathname = '/api/mcp';\n    request = new Request(url.toString(), request);\n  }\n  \n  // Delegate to the handler\n  return handler(request);\n}\n\n// Export handlers for Vercel Functions\nexport { handleRequest as GET, handleRequest as POST, handleRequest as DELETE };\n\n"
  },
  {
    "path": "api/well-known-mcp-config.ts",
    "content": "/**\n * Well-known endpoint for MCP configuration schema\n * \n * Exposes a JSON Schema at /.well-known/mcp-config for Smithery and other MCP clients\n * to discover available configuration options. This enables configuration forms in\n * Smithery's UI and allows clients to pass configuration via URL parameters.\n */\n\nconst AVAILABLE_TOOLS = [\n  'web_search_exa',\n  'web_search_advanced_exa',\n  'get_code_context_exa',\n  'crawling_exa',\n];\n\nconst configSchema = {\n  \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n  \"$id\": \"/.well-known/mcp-config\",\n  \"title\": \"Exa MCP Server Configuration\",\n  \"description\": \"Configuration for connecting to the Exa MCP server\",\n  \"x-query-style\": \"dot+bracket\",\n  \"type\": \"object\",\n  \"properties\": {\n    \"exaApiKey\": {\n      \"type\": \"string\",\n      \"title\": \"Exa API Key\",\n      \"description\": \"Your Exa AI API key for search operations (optional - server has a fallback key). Get one at https://exa.ai\"\n    },\n    \"tools\": {\n      \"type\": \"string\",\n      \"title\": \"Enabled Tools\",\n      \"description\": \"Comma-separated list of tools to enable. Leave empty for defaults (web_search_exa, get_code_context_exa).\",\n      \"examples\": [\n        \"web_search_exa,web_search_advanced_exa\",\n        \"web_search_exa,web_search_advanced_exa,get_code_context_exa\"\n      ],\n      \"x-available-values\": AVAILABLE_TOOLS\n    },\n    \"debug\": {\n      \"type\": \"boolean\",\n      \"title\": \"Debug Mode\",\n      \"description\": \"Enable debug logging for troubleshooting\",\n      \"default\": false\n    }\n  },\n  \"additionalProperties\": false\n};\n\nexport function GET(): Response {\n  return new Response(JSON.stringify(configSchema, null, 2), {\n    status: 200,\n    headers: {\n      'Content-Type': 'application/json',\n      'Access-Control-Allow-Origin': '*',\n      'Access-Control-Allow-Methods': 'GET, OPTIONS',\n      'Access-Control-Allow-Headers': 'Content-Type',\n      'Cache-Control': 'public, max-age=3600'\n    }\n  });\n}\n\nexport function OPTIONS(): Response {\n  return new Response(null, {\n    status: 204,\n    headers: {\n      'Access-Control-Allow-Origin': '*',\n      'Access-Control-Allow-Methods': 'GET, OPTIONS',\n      'Access-Control-Allow-Headers': 'Content-Type'\n    }\n  });\n}\n"
  },
  {
    "path": "env.example",
    "content": "# Exa AI API Key (Required)\n# Get your API key from: https://exa.ai/\nEXA_API_KEY=your_exa_api_key_here\n\n# Debug logging (Optional)\n# Set to 'true' to enable detailed logging\nDEBUG=false\n\n# Enabled Tools (Optional)\n# Comma-separated list of tools to enable\n# Available tools:\n#   - web_search_exa (enabled by default)\n#   - get_code_context_exa (enabled by default)\n#   - company_research_exa (enabled by default)\n#   - web_search_advanced_exa\n#   - crawling_exa\n#   - deep_researcher_start\n#   - deep_researcher_check\n#   - people_search_exa\n#   - linkedin_search_exa (deprecated: use people_search_exa)\n# \n# Example: ENABLED_TOOLS=web_search_exa,get_code_context_exa,crawling_exa\n# Leave empty or comment out to use defaults\n# ENABLED_TOOLS=\n\n"
  },
  {
    "path": "gemini-extension.json",
    "content": "{\n  \"name\": \"exa-mcp-server\",\n  \"version\": \"3.1.9\",\n  \"description\": \"Official Exa MCP for web search, web crawling and getting technical code docs.\",\n  \"mcpServers\": {\n    \"exa\": {\n      \"httpUrl\": \"https://mcp.exa.ai/mcp\"\n    }\n  }\n}\n"
  },
  {
    "path": "llm_mcp_docs.txt",
    "content": "# Example Clients\nSource: https://modelcontextprotocol.io/clients\n\nA list of applications that support MCP integrations\n\nThis page provides an overview of applications that support the Model Context Protocol (MCP). Each client may support different MCP features, allowing for varying levels of integration with MCP servers.\n\n## Feature support matrix\n\n<div id=\"feature-support-matrix-wrapper\">\n  {/* prettier-ignore-start */}\n\n  | Client                                           | [Resources] | [Prompts] | [Tools] | [Discovery] | [Sampling] | [Roots] | Notes                                                                                           |\n  | ------------------------------------------------ | ----------- | --------- | ------- | ----------- | ---------- | ------- | ----------------------------------------------------------------------------------------------- |\n  | [5ire][5ire]                                     | ❌           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools.                                                                                 |\n  | [AgentAI][AgentAI]                               | ❌           | ❌         | ✅       | ❓           | ❌          | ❌       | Agent Library written in Rust with tools support                                                |\n  | [AgenticFlow][AgenticFlow]                       | ✅           | ✅         | ✅       | ✅           | ❌          | ❌       | Supports tools, prompts, and resources for no-code AI agents and multi-agent workflows.         |\n  | [Amazon Q CLI][Amazon Q CLI]                     | ❌           | ✅         | ✅       | ❓           | ❌          | ❌       | Supports prompts and tools.                                                                     |\n  | [Apify MCP Tester][Apify MCP Tester]             | ❌           | ❌         | ✅       | ✅           | ❌          | ❌       | Supports remote MCP servers and tool discovery.                                                 |\n  | [Augment Code][AugmentCode]                      | ❌           | ❌         | ✅       | ❌           | ❌          | ❌       | Supports tools in local and remote agents.                                                      |\n  | [BeeAI Framework][BeeAI Framework]               | ❌           | ❌         | ✅       | ❌           | ❌          | ❌       | Supports tools in agentic workflows.                                                            |\n  | [BoltAI][BoltAI]                                 | ❌           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools.                                                                                 |\n  | [Claude.ai][Claude.ai]                           | ✅           | ✅         | ✅       | ❌           | ❌          | ❌       | Supports tools, prompts, and resources for remote MCP servers.                                  |\n  | [Claude Code][Claude Code]                       | ❌           | ✅         | ✅       | ❌           | ❌          | ❌       | Supports prompts and tools                                                                      |\n  | [Claude Desktop App][Claude Desktop]             | ✅           | ✅         | ✅       | ❌           | ❌          | ❌       | Supports tools, prompts, and resources for local and remote MCP servers.                        |\n  | [Cline][Cline]                                   | ✅           | ❌         | ✅       | ✅           | ❌          | ❌       | Supports tools and resources.                                                                   |\n  | [Continue][Continue]                             | ✅           | ✅         | ✅       | ❓           | ❌          | ❌       | Supports tools, prompts, and resources.                                                         |\n  | [Copilot-MCP][CopilotMCP]                        | ✅           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools and resources.                                                                   |\n  | [Cursor][Cursor]                                 | ❌           | ❌         | ✅       | ❌           | ❌          | ❌       | Supports tools.                                                                                 |\n  | [Daydreams Agents][Daydreams]                    | ✅           | ✅         | ✅       | ❌           | ❌          | ❌       | Support for drop in Servers to Daydreams agents                                                 |\n  | [Emacs Mcp][Mcp.el]                              | ❌           | ❌         | ✅       | ❌           | ❌          | ❌       | Supports tools in Emacs.                                                                        |\n  | [fast-agent][fast-agent]                         | ✅           | ✅         | ✅       | ✅           | ✅          | ✅       | Full multimodal MCP support, with end-to-end tests                                              |\n  | [FLUJO][FLUJO]                                   | ❌           | ❌         | ✅       | ❓           | ❌          | ❌       | Support for resources, Prompts and Roots are coming soon                                        |\n  | [Genkit][Genkit]                                 | ⚠️          | ✅         | ✅       | ❓           | ❌          | ❌       | Supports resource list and lookup through tools.                                                |\n  | [Glama][Glama]                                   | ✅           | ✅         | ✅       | ❓           | ❌          | ❌       | Supports tools.                                                                                 |\n  | [GenAIScript][GenAIScript]                       | ❌           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools.                                                                                 |\n  | [Goose][Goose]                                   | ❌           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools.                                                                                 |\n  | [gptme][gptme]                                   | ❌           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools.                                                                                 |\n  | [HyperAgent][HyperAgent]                         | ❌           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools.                                                                                 |\n  | [JetBrains AI Assistant][JetBrains AI Assistant] | ❌           | ❌         | ✅       | ❌           | ❌          | ❌       | Supports tools for all JetBrains IDEs.                                                          |\n  | [Klavis AI Slack/Discord/Web][Klavis AI]         | ✅           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools and resources.                                                                   |\n  | [LibreChat][LibreChat]                           | ❌           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools for Agents                                                                       |\n  | [Lutra][Lutra]                                   | ✅           | ✅         | ✅       | ❓           | ❌          | ❌       | Supports any MCP server for reusable playbook creation.                                         |\n  | [mcp-agent][mcp-agent]                           | ✅           | ✅         | ✅       | ❓           | ⚠️         | ✅       | Supports tools, prompts, resources, roots, server connection management, and agent workflows.   |\n  | [mcp-use][mcp-use]                               | ✅           | ✅         | ✅       | ❓           | ❌          | ❌       | Support tools, resources, stdio & http connection, local llms-agents.                           |\n  | [MCPHub][MCPHub]                                 | ✅           | ✅         | ✅       | ❓           | ❌          | ❌       | Supports tools, resources, and prompts in Neovim                                                |\n  | [MCPOmni-Connect][MCPOmni-Connect]               | ✅           | ✅         | ✅       | ❓           | ✅          | ❌       | Supports tools with agentic mode, ReAct, and orchestrator capabilities.                         |\n  | [Memex][Memex]                                   | ✅           | ✅         | ✅       | ❓           | ❌          | ❌       | Support tools. Also support *building and testing* MCP server, all-in-one desktop app.          |\n  | [Microsoft Copilot Studio]                       | ❌           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools                                                                                  |\n  | [MindPal][MindPal]                               | ❌           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools for no-code AI agents and multi-agent workflows.                                 |\n  | [MooPoint][MooPoint]                             | ❌           | ❌         | ✅       | ❓           | ✅          | ❌       | Web-Hosted client with tool calling support                                                     |\n  | [Msty Studio][Msty Studio]                       | ❌           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools                                                                                  |\n  | [NVIDIA Agent Intelligence toolkit][AIQ toolkit] | ❌           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools in agentic workflows.                                                            |\n  | [OpenSumi][OpenSumi]                             | ❌           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools in OpenSumi                                                                      |\n  | [oterm][oterm]                                   | ❌           | ✅         | ✅       | ❓           | ✅          | ❌       | Supports tools, prompts and sampling for Ollama.                                                |\n  | [Postman][postman]                               | ✅           | ✅         | ✅       | ❓           | ❌          | ❌       | Supports tools, resources, prompts, and sampling                                                |\n  | [Roo Code][Roo Code]                             | ✅           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools and resources.                                                                   |\n  | [Slack MCP Client][Slack MCP Client]             | ❌           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools and multiple servers.                                                            |\n  | [Sourcegraph Cody][Cody]                         | ✅           | ❌         | ❌       | ❓           | ❌          | ❌       | Supports resources through OpenCTX                                                              |\n  | [SpinAI][SpinAI]                                 | ❌           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools for Typescript AI Agents                                                         |\n  | [Superinterface][Superinterface]                 | ❌           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools                                                                                  |\n  | [Superjoin][Superjoin]                           | ❌           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools and multiple servers.                                                            |\n  | [TheiaAI/TheiaIDE][TheiaAI/TheiaIDE]             | ❌           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools for Agents in Theia AI and the AI-powered Theia IDE                              |\n  | [Tome][Tome]                                     | ❌           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools, manages MCP servers.                                                            |\n  | [TypingMind App][TypingMind App]                 | ❌           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools at app-level (appear as plugins) or when assigned to Agents                      |\n  | [VS Code GitHub Copilot][VS Code]                | ✅           | ✅         | ✅       | ✅           | ✅          | ✅       | Supports dynamic tool/roots discovery, secure secret configuration, and explicit tool prompting |\n  | [Warp][Warp]                                     | ✅           | ❌         | ✅       | ✅           | ❌          | ❌       | Supports tools, resources, and most of the discovery criteria                                   |\n  | [WhatsMCP][WhatsMCP]                             | ❌           | ❌         | ✅       | ❌           | ❌          | ❌       | Supports tools for Remote MCP Servers in WhatsApp                                               |\n  | [Windsurf Editor][Windsurf]                      | ❌           | ❌         | ✅       | ✅           | ❌          | ❌       | Supports tools with AI Flow for collaborative development.                                      |\n  | [Witsy][Witsy]                                   | ❌           | ❌         | ✅       | ❓           | ❌          | ❌       | Supports tools in Witsy.                                                                        |\n  | [Zed][Zed]                                       | ❌           | ✅         | ❌       | ❌           | ❌          | ❌       | Prompts appear as slash commands                                                                |\n  | [Zencoder][Zencoder]                             | ❌           | ❌         | ✅       | ❌           | ❌          | ❌       | Supports tools                                                                                  |\n\n  {/* prettier-ignore-end */}\n\n  [Resources]: /docs/concepts/resources\n\n  [Prompts]: /docs/concepts/prompts\n\n  [Tools]: /docs/concepts/tools\n\n  [Discovery]: /docs/concepts/tools#tool-discovery-and-updates\n\n  [Sampling]: /docs/concepts/sampling\n\n  [Roots]: /docs/concepts/roots\n\n  [5ire]: https://github.com/nanbingxyz/5ire\n\n  [AgentAI]: https://github.com/AdamStrojek/rust-agentai\n\n  [AgenticFlow]: https://agenticflow.ai/mcp\n\n  [AIQ toolkit]: https://github.com/NVIDIA/AIQToolkit\n\n  [Amazon Q CLI]: https://github.com/aws/amazon-q-developer-cli\n\n  [Apify MCP Tester]: https://apify.com/jiri.spilka/tester-mcp-client\n\n  [AugmentCode]: https://augmentcode.com\n\n  [BeeAI Framework]: https://i-am-bee.github.io/beeai-framework\n\n  [BoltAI]: https://boltai.com\n\n  [Claude.ai]: https://claude.ai\n\n  [Claude Code]: https://claude.ai/code\n\n  [Claude Desktop]: https://claude.ai/download\n\n  [Cline]: https://github.com/cline/cline\n\n  [Continue]: https://github.com/continuedev/continue\n\n  [CopilotMCP]: https://github.com/VikashLoomba/copilot-mcp\n\n  [Cursor]: https://cursor.com\n\n  [Daydreams]: https://github.com/daydreamsai/daydreams\n\n  [Klavis AI]: https://www.klavis.ai/\n\n  [Mcp.el]: https://github.com/lizqwerscott/mcp.el\n\n  [fast-agent]: https://github.com/evalstate/fast-agent\n\n  [FLUJO]: https://github.com/mario-andreschak/flujo\n\n  [Glama]: https://glama.ai/chat\n\n  [Genkit]: https://github.com/firebase/genkit\n\n  [GenAIScript]: https://microsoft.github.io/genaiscript/reference/scripts/mcp-tools/\n\n  [Goose]: https://block.github.io/goose/docs/goose-architecture/#interoperability-with-extensions\n\n  [JetBrains AI Assistant]: https://plugins.jetbrains.com/plugin/22282-jetbrains-ai-assistant\n\n  [LibreChat]: https://github.com/danny-avila/LibreChat\n\n  [Lutra]: https://lutra.ai\n\n  [mcp-agent]: https://github.com/lastmile-ai/mcp-agent\n\n  [mcp-use]: https://github.com/pietrozullo/mcp-use\n\n  [MCPHub]: https://github.com/ravitemer/mcphub.nvim\n\n  [MCPOmni-Connect]: https://github.com/Abiorh001/mcp_omni_connect\n\n  [Memex]: https://memex.tech/\n\n  [Microsoft Copilot Studio]: https://learn.microsoft.com/en-us/microsoft-copilot-studio/agent-extend-action-mcp\n\n  [MindPal]: https://mindpal.io\n\n  [MooPoint]: https://moopoint.io\n\n  [Msty Studio]: https://msty.ai\n\n  [OpenSumi]: https://github.com/opensumi/core\n\n  [oterm]: https://github.com/ggozad/oterm\n\n  [Postman]: https://postman.com/downloads\n\n  [Roo Code]: https://roocode.com\n\n  [Slack MCP Client]: https://github.com/tuannvm/slack-mcp-client\n\n  [Cody]: https://sourcegraph.com/cody\n\n  [SpinAI]: https://spinai.dev\n\n  [Superinterface]: https://superinterface.ai\n\n  [Superjoin]: https://superjoin.ai\n\n  [TheiaAI/TheiaIDE]: https://eclipsesource.com/blogs/2024/12/19/theia-ide-and-theia-ai-support-mcp/\n\n  [Tome]: https://github.com/runebookai/tome\n\n  [TypingMind App]: https://www.typingmind.com\n\n  [VS Code]: https://code.visualstudio.com/\n\n  [Windsurf]: https://codeium.com/windsurf\n\n  [gptme]: https://github.com/gptme/gptme\n\n  [Warp]: https://www.warp.dev/\n\n  [WhatsMCP]: https://wassist.app/mcp/\n\n  [Witsy]: https://github.com/nbonamy/witsy\n\n  [Zed]: https://zed.dev\n\n  [Zencoder]: https://zencoder.ai\n\n  [HyperAgent]: https://github.com/hyperbrowserai/HyperAgent\n</div>\n\n## Client details\n\n### 5ire\n\n[5ire](https://github.com/nanbingxyz/5ire) is an open source cross-platform desktop AI assistant that supports tools through MCP servers.\n\n**Key features:**\n\n* Built-in MCP servers can be quickly enabled and disabled.\n* Users can add more servers by modifying the configuration file.\n* It is open-source and user-friendly, suitable for beginners.\n* Future support for MCP will be continuously improved.\n\n### AgentAI\n\n[AgentAI](https://github.com/AdamStrojek/rust-agentai) is a Rust library designed to simplify the creation of AI agents. The library includes seamless integration with MCP Servers.\n\n[Example of MCP Server integration](https://github.com/AdamStrojek/rust-agentai/blob/master/examples/tools_mcp.rs)\n\n**Key features:**\n\n* Multi-LLM – We support most LLM APIs (OpenAI, Anthropic, Gemini, Ollama, and all OpenAI API Compatible).\n* Built-in support for MCP Servers.\n* Create agentic flows in a type- and memory-safe language like Rust.\n\n### AgenticFlow\n\n[AgenticFlow](https://agenticflow.ai/) is a no-code AI platform that helps you build agents that handle sales, marketing, and creative tasks around the clock. Connect 2,500+ APIs and 10,000+ tools securely via MCP.\n\n**Key features:**\n\n* No-code AI agent creation and workflow building.\n* Access a vast library of 10,000+ tools and 2,500+ APIs through MCP.\n* Simple 3-step process to connect MCP servers.\n* Securely manage connections and revoke access anytime.\n\n**Learn more:**\n\n* [AgenticFlow MCP Integration](https://agenticflow.ai/mcp)\n\n### Amazon Q CLI\n\n[Amazon Q CLI](https://github.com/aws/amazon-q-developer-cli) is an open-source, agentic coding assistant for terminals.\n\n**Key features:**\n\n* Full support for MCP servers.\n* Edit prompts using your preferred text editor.\n* Access saved prompts instantly with `@`.\n* Control and organize AWS resources directly from your terminal.\n* Tools, profiles, context management, auto-compact, and so much more!\n\n**Get Started**\n\n```bash\nbrew install amazon-q\n```\n\n### Apify MCP Tester\n\n[Apify MCP Tester](https://github.com/apify/tester-mcp-client) is an open-source client that connects to any MCP server using Server-Sent Events (SSE).\nIt is a standalone Apify Actor designed for testing MCP servers over SSE, with support for Authorization headers.\nIt uses plain JavaScript (old-school style) and is hosted on Apify, allowing you to run it without any setup.\n\n**Key features:**\n\n* Connects to any MCP server via SSE.\n* Works with the [Apify MCP Server](https://apify.com/apify/actors-mcp-server) to interact with one or more Apify [Actors](https://apify.com/store).\n* Dynamically utilizes tools based on context and user queries (if supported by the server).\n\n### Augment Code\n\n[Augment Code](https://augmentcode.com) is an AI-powered coding platform for VS Code and JetBrains with autonomous agents, chat, and completions. Both local and remote agents are backed by full codebase awareness and native support for MCP, enabling enhanced context through external sources and tools.\n\n**Key features:**\n\n* Full MCP support in local and remote agents.\n* Add additional context through MCP servers.\n* Automate your development workflows with MCP tools.\n* Works in VS Code and JetBrains IDEs.\n\n### BeeAI Framework\n\n[BeeAI Framework](https://i-am-bee.github.io/beeai-framework) is an open-source framework for building, deploying, and serving powerful agentic workflows at scale. The framework includes the **MCP Tool**, a native feature that simplifies the integration of MCP servers into agentic workflows.\n\n**Key features:**\n\n* Seamlessly incorporate MCP tools into agentic workflows.\n* Quickly instantiate framework-native tools from connected MCP client(s).\n* Planned future support for agentic MCP capabilities.\n\n**Learn more:**\n\n* [Example of using MCP tools in agentic workflow](https://i-am-bee.github.io/beeai-framework/#/typescript/tools?id=using-the-mcptool-class)\n\n### BoltAI\n\n[BoltAI](https://boltai.com) is a native, all-in-one AI chat client with MCP support. BoltAI supports multiple AI providers (OpenAI, Anthropic, Google AI...), including local AI models (via Ollama, LM Studio or LMX)\n\n**Key features:**\n\n* MCP Tool integrations: once configured, user can enable individual MCP server in each chat\n* MCP quick setup: import configuration from Claude Desktop app or Cursor editor\n* Invoke MCP tools inside any app with AI Command feature\n* Integrate with remote MCP servers in the mobile app\n\n**Learn more:**\n\n* [BoltAI docs](https://boltai.com/docs/plugins/mcp-servers)\n* [BoltAI website](https://boltai.com)\n\n### Claude Code\n\nClaude Code is an interactive agentic coding tool from Anthropic that helps you code faster through natural language commands. It supports MCP integration for prompts and tools, and also functions as an MCP server to integrate with other clients.\n\n**Key features:**\n\n* Tool and prompt support for MCP servers\n* Offers its own tools through an MCP server for integrating with other MCP clients\n\n### Claude.ai\n\n[Claude.ai](https://claude.ai) is Anthropic's web-based AI assistant that provides MCP support for remote servers.\n\n**Key features:**\n\n* Support for remote MCP servers via integrations UI in settings\n* Access to tools, prompts, and resources from configured MCP servers\n* Seamless integration with Claude's conversational interface\n* Enterprise-grade security and compliance features\n\n### Claude Desktop App\n\nThe Claude desktop application provides comprehensive support for MCP, enabling deep integration with local tools and data sources.\n\n**Key features:**\n\n* Full support for resources, allowing attachment of local files and data\n* Support for prompt templates\n* Tool integration for executing commands and scripts\n* Local server connections for enhanced privacy and security\n\n### Cline\n\n[Cline](https://github.com/cline/cline) is an autonomous coding agent in VS Code that edits files, runs commands, uses a browser, and more–with your permission at each step.\n\n**Key features:**\n\n* Create and add tools through natural language (e.g. \"add a tool that searches the web\")\n* Share custom MCP servers Cline creates with others via the `~/Documents/Cline/MCP` directory\n* Displays configured MCP servers along with their tools, resources, and any error logs\n\n### Continue\n\n[Continue](https://github.com/continuedev/continue) is an open-source AI code assistant, with built-in support for all MCP features.\n\n**Key features:**\n\n* Type \"@\" to mention MCP resources\n* Prompt templates surface as slash commands\n* Use both built-in and MCP tools directly in chat\n* Supports VS Code and JetBrains IDEs, with any LLM\n\n### Copilot-MCP\n\n[Copilot-MCP](https://github.com/VikashLoomba/copilot-mcp) enables AI coding assistance via MCP.\n\n**Key features:**\n\n* Support for MCP tools and resources\n* Integration with development workflows\n* Extensible AI capabilities\n\n### Cursor\n\n[Cursor](https://docs.cursor.com/advanced/model-context-protocol) is an AI code editor.\n\n**Key features:**\n\n* Support for MCP tools in Cursor Composer\n* Support for both STDIO and SSE\n\n### Daydreams\n\n[Daydreams](https://github.com/daydreamsai/daydreams) is a generative agent framework for executing anything onchain\n\n**Key features:**\n\n* Supports MCP Servers in config\n* Exposes MCP Client\n\n### Emacs Mcp\n\n[Emacs Mcp](https://github.com/lizqwerscott/mcp.el) is an Emacs client designed to interface with MCP servers, enabling seamless connections and interactions. It provides MCP tool invocation support for AI plugins like [gptel](https://github.com/karthink/gptel) and [llm](https://github.com/ahyatt/llm), adhering to Emacs' standard tool invocation format. This integration enhances the functionality of AI tools within the Emacs ecosystem.\n\n**Key features:**\n\n* Provides MCP tool support for Emacs.\n\n### fast-agent\n\n[fast-agent](https://github.com/evalstate/fast-agent) is a Python Agent framework, with simple declarative support for creating Agents and Workflows, with full multi-modal support for Anthropic and OpenAI models.\n\n**Key features:**\n\n* PDF and Image support, based on MCP Native types\n* Interactive front-end to develop and diagnose Agent applications, including passthrough and playback simulators\n* Built in support for \"Building Effective Agents\" workflows.\n* Deploy Agents as MCP Servers\n\n### FLUJO\n\nThink n8n + ChatGPT. FLUJO is an desktop application that integrates with MCP to provide a workflow-builder interface for AI interactions. Built with Next.js and React, it supports both online and offline (ollama) models, it manages API Keys and environment variables centrally and can install MCP Servers from GitHub. FLUJO has an ChatCompletions endpoint and flows can be executed from other AI applications like Cline, Roo or Claude.\n\n**Key features:**\n\n* Environment & API Key Management\n* Model Management\n* MCP Server Integration\n* Workflow Orchestration\n* Chat Interface\n\n### Genkit\n\n[Genkit](https://github.com/firebase/genkit) is a cross-language SDK for building and integrating GenAI features into applications. The [genkitx-mcp](https://github.com/firebase/genkit/tree/main/js/plugins/mcp) plugin enables consuming MCP servers as a client or creating MCP servers from Genkit tools and prompts.\n\n**Key features:**\n\n* Client support for tools and prompts (resources partially supported)\n* Rich discovery with support in Genkit's Dev UI playground\n* Seamless interoperability with Genkit's existing tools and prompts\n* Works across a wide variety of GenAI models from top providers\n\n### Glama\n\n[Glama](https://glama.ai/chat) is a comprehensive AI workspace and integration platform that offers a unified interface to leading LLM providers, including OpenAI, Anthropic, and others. It supports the Model Context Protocol (MCP) ecosystem, enabling developers and enterprises to easily discover, build, and manage MCP servers.\n\n**Key features:**\n\n* Integrated [MCP Server Directory](https://glama.ai/mcp/servers)\n* Integrated [MCP Tool Directory](https://glama.ai/mcp/tools)\n* Host MCP servers and access them via the Chat or SSE endpoints\n  – Ability to chat with multiple LLMs and MCP servers at once\n* Upload and analyze local files and data\n* Full-text search across all your chats and data\n\n### GenAIScript\n\nProgrammatically assemble prompts for LLMs using [GenAIScript](https://microsoft.github.io/genaiscript/) (in JavaScript). Orchestrate LLMs, tools, and data in JavaScript.\n\n**Key features:**\n\n* JavaScript toolbox to work with prompts\n* Abstraction to make it easy and productive\n* Seamless Visual Studio Code integration\n\n### Goose\n\n[Goose](https://github.com/block/goose) is an open source AI agent that supercharges your software development by automating coding tasks.\n\n**Key features:**\n\n* Expose MCP functionality to Goose through tools.\n* MCPs can be installed directly via the [extensions directory](https://block.github.io/goose/v1/extensions/), CLI, or UI.\n* Goose allows you to extend its functionality by [building your own MCP servers](https://block.github.io/goose/docs/tutorials/custom-extensions).\n* Includes built-in tools for development, web scraping, automation, memory, and integrations with JetBrains and Google Drive.\n\n### gptme\n\n[gptme](https://github.com/gptme/gptme) is a open-source terminal-based personal AI assistant/agent, designed to assist with programming tasks and general knowledge work.\n\n**Key features:**\n\n* CLI-first design with a focus on simplicity and ease of use\n* Rich set of built-in tools for shell commands, Python execution, file operations, and web browsing\n* Local-first approach with support for multiple LLM providers\n* Open-source, built to be extensible and easy to modify\n\n### HyperAgent\n\n[HyperAgent](https://github.com/hyperbrowserai/HyperAgent) is Playwright supercharged with AI. With HyperAgent, you no longer need brittle scripts, just powerful natural language commands. Using MCP servers, you can extend the capability of HyperAgent, without having to write any code.\n\n**Key features:**\n\n* AI Commands: Simple APIs like page.ai(), page.extract() and executeTask() for any AI automation\n* Fallback to Regular Playwright: Use regular Playwright when AI isn't needed\n* Stealth Mode – Avoid detection with built-in anti-bot patches\n* Cloud Ready – Instantly scale to hundreds of sessions via [Hyperbrowser](https://www.hyperbrowser.ai/)\n* MCP Client – Connect to tools like Composio for full workflows (e.g. writing web data to Google Sheets)\n\n### JetBrains AI Assistant\n\n[JetBrains AI Assistant](https://plugins.jetbrains.com/plugin/22282-jetbrains-ai-assistant) plugin provides AI-powered features for software development available in all JetBrains IDEs.\n\n**Key features:**\n\n* Unlimited code completion powered by Mellum, JetBrains’ proprietary AI model.\n* Context-aware AI chat that understands your code and helps you in real time.\n* Access to top-tier models from OpenAI, Anthropic, and Google.\n* Offline mode with connected local LLMs via Ollama or LM Studio.\n* Deep integration into IDE workflows, including code suggestions in the editor, VCS assistance, runtime error explanation, and more.\n\n### Klavis AI Slack/Discord/Web\n\n[Klavis AI](https://www.klavis.ai/) is an Open-Source Infra to Use, Build & Scale MCPs with ease.\n\n**Key features:**\n\n* Slack/Discord/Web MCP clients for using MCPs directly\n* Simple web UI dashboard for easy MCP configuration\n* Direct OAuth integration with Slack & Discord Clients and MCP Servers for secure user authentication\n* SSE transport support\n* Open-source infrastructure ([GitHub repository](https://github.com/Klavis-AI/klavis))\n\n**Learn more:**\n\n* [Demo video showing MCP usage in Slack/Discord](https://youtu.be/9-QQAhrQWw8)\n\n### LibreChat\n\n[LibreChat](https://github.com/danny-avila/LibreChat) is an open-source, customizable AI chat UI that supports multiple AI providers, now including MCP integration.\n\n**Key features:**\n\n* Extend current tool ecosystem, including [Code Interpreter](https://www.librechat.ai/docs/features/code_interpreter) and Image generation tools, through MCP servers\n* Add tools to customizable [Agents](https://www.librechat.ai/docs/features/agents), using a variety of LLMs from top providers\n* Open-source and self-hostable, with secure multi-user support\n* Future roadmap includes expanded MCP feature support\n\n### Lutra\n\n[Lutra](https://lutra.ai) is an AI agent that transforms conversations into actionable, automated workflows.\n\n**Key features:**\n\n* Easy MCP Integration: Connecting Lutra to MCP servers is as simple as providing the server URL; Lutra handles the rest behind the scenes.\n* Chat to Take Action: Lutra understands your conversational context and goals, automatically integrating with your existing apps to perform tasks.\n* Reusable Playbooks: After completing a task, save the steps as reusable, automated workflows—simplifying repeatable processes and reducing manual effort.\n* Shareable Automations: Easily share your saved playbooks with teammates to standardize best practices and accelerate collaborative workflows.\n\n**Learn more:**\n\n* [Lutra AI agent explained](https://www.youtube.com/watch?v=W5ZpN0cMY70)\n\n### mcp-agent\n\n[mcp-agent] is a simple, composable framework to build agents using Model Context Protocol.\n\n**Key features:**\n\n* Automatic connection management of MCP servers.\n* Expose tools from multiple servers to an LLM.\n* Implements every pattern defined in [Building Effective Agents](https://www.anthropic.com/research/building-effective-agents).\n* Supports workflow pause/resume signals, such as waiting for human feedback.\n\n### mcp-use\n\n[mcp-use] is an open source python library to very easily connect any LLM to any MCP server both locally and remotely.\n\n**Key features:**\n\n* Very simple interface to connect any LLM to any MCP.\n* Support the creation of custom agents, workflows.\n* Supports connection to multiple MCP servers simultaneously.\n* Supports all langchain supported models, also locally.\n* Offers efficient tool orchestration and search functionalities.\n\n### MCPHub\n\n[MCPHub] is a powerful Neovim plugin that integrates MCP (Model Context Protocol) servers into your workflow.\n\n**Key features:**\n\n* Install, configure and manage MCP servers with an intuitive UI.\n* Built-in Neovim MCP server with support for file operations (read, write, search, replace), command execution, terminal integration, LSP integration, buffers, and diagnostics.\n* Create Lua-based MCP servers directly in Neovim.\n* Inegrates with popular Neovim chat plugins Avante.nvim and CodeCompanion.nvim\n\n### MCPOmni-Connect\n\n[MCPOmni-Connect](https://github.com/Abiorh001/mcp_omni_connect) is a versatile command-line interface (CLI) client designed to connect to various Model Context Protocol (MCP) servers using both stdio and SSE transport.\n\n**Key features:**\n\n* Support for resources, prompts, tools, and sampling\n* Agentic mode with ReAct and orchestrator capabilities\n* Seamless integration with OpenAI models and other LLMs\n* Dynamic tool and resource management across multiple servers\n* Support for both stdio and SSE transport protocols\n* Comprehensive tool orchestration and resource analysis capabilities\n\n### Memex\n\n[Memex](https://memex.tech/) is the first MCP client and MCP server builder - all-in-one desktop app. Unlike traditional MCP clients that only consume existing servers, Memex can create custom MCP servers from natural language prompts, immediately integrate them into its toolkit, and use them to solve problems—all within a single conversation.\n\n**Key features:**\n\n* **Prompt-to-MCP Server**: Generate fully functional MCP servers from natural language descriptions\n* **Self-Testing & Debugging**: Autonomously test, debug, and improve created MCP servers\n* **Universal MCP Client**: Works with any MCP server through intuitive, natural language integration\n* **Curated MCP Directory**: Access to tested, one-click installable MCP servers (Neon, Netlify, GitHub, Context7, and more)\n* **Multi-Server Orchestration**: Leverage multiple MCP servers simultaneously for complex workflows\n\n**Learn more:**\n\n* [Memex Launch 2: MCP Teams and Agent API](https://memex.tech/blog/memex-launch-2-mcp-teams-and-agent-api-private-preview-125f)\n\n### Microsoft Copilot Studio\n\n[Microsoft Copilot Studio] is a robust SaaS platform designed for building custom AI-driven applications and intelligent agents, empowering developers to create, deploy, and manage sophisticated AI solutions.\n\n**Key features:**\n\n* Support for MCP tools\n* Extend Copilot Studio agents with MCP servers\n* Leveraging Microsoft unified, governed, and secure API management solutions\n\n### MindPal\n\n[MindPal](https://mindpal.io) is a no-code platform for building and running AI agents and multi-agent workflows for business processes.\n\n**Key features:**\n\n* Build custom AI agents with no-code\n* Connect any SSE MCP server to extend agent tools\n* Create multi-agent workflows for complex business processes\n* User-friendly for both technical and non-technical professionals\n* Ongoing development with continuous improvement of MCP support\n\n**Learn more:**\n\n* [MindPal MCP Documentation](https://docs.mindpal.io/agent/mcp)\n\n### MooPoint\n\n[MooPoint](https://moopoint.io)\n\nMooPoint is a web-based AI chat platform built for developers and advanced users, letting you interact with multiple large language models (LLMs) through a single, unified interface. Connect your own API keys (OpenAI, Anthropic, and more) and securely manage custom MCP server integrations.\n\n**Key features:**\n\n* Accessible from any PC or smartphone—no installation required\n* Choose your preferred LLM provider\n* Supports `SSE`, `Streamable HTTP`, `npx`, and `uvx` MCP servers\n* OAuth and sampling support\n* New features added daily\n\n### Msty Studio\n\n[Msty Studio](https://msty.ai) is a privacy-first AI productivity platform that seamlessly integrates local and online language models (LLMs) into customizable workflows. Designed for both technical and non-technical users, Msty Studio offers a suite of tools to enhance AI interactions, automate tasks, and maintain full control over data and model behavior.\n\n**Key features:**\n\n* **Toolbox & Toolsets**: Connect AI models to local tools and scripts using MCP-compliant configurations. Group tools into Toolsets to enable dynamic, multi-step workflows within conversations.\n* **Turnstiles**: Create automated, multi-step AI interactions, allowing for complex data processing and decision-making flows.\n* **Real-Time Data Integration**: Enhance AI responses with up-to-date information by integrating real-time web search capabilities.\n* **Split Chats & Branching**: Engage in parallel conversations with multiple models simultaneously, enabling comparative analysis and diverse perspectives.\n\n**Learn more:**\n\n* [Msty Studio Documentation](https://docs.msty.studio/features/toolbox/tools)\n\n### NVIDIA Agent Intelligence (AIQ) toolkit\n\n[NVIDIA Agent Intelligence (AIQ) toolkit](https://github.com/NVIDIA/AIQToolkit) is a flexible, lightweight, and unifying library that allows you to easily connect existing enterprise agents to data sources and tools across any framework.\n\n**Key features:**\n\n* Acts as an MCP **client** to consume remote tools\n* Acts as an MCP **server** to expose tools\n* Framework agnostic and compatible with LangChain, CrewAI, Semantic Kernel, and custom agents\n* Includes built-in observability and evaluation tools\n\n**Learn more:**\n\n* [AIQ toolkit GitHub repository](https://github.com/NVIDIA/AIQToolkit)\n* [AIQ toolkit MCP documentation](https://docs.nvidia.com/aiqtoolkit/latest/workflows/mcp/index.html)\n\n### OpenSumi\n\n[OpenSumi](https://github.com/opensumi/core) is a framework helps you quickly build AI Native IDE products.\n\n**Key features:**\n\n* Supports MCP tools in OpenSumi\n* Supports built-in IDE MCP servers and custom MCP servers\n\n### oterm\n\n[oterm] is a terminal client for Ollama allowing users to create chats/agents.\n\n**Key features:**\n\n* Support for multiple fully customizable chat sessions with Ollama connected with tools.\n* Support for MCP tools.\n\n### Roo Code\n\n[Roo Code](https://roocode.com) enables AI coding assistance via MCP.\n\n**Key features:**\n\n* Support for MCP tools and resources\n* Integration with development workflows\n* Extensible AI capabilities\n\n### Postman\n\n[Postman](https://postman.com/downloads) is the most popular API client and now supports MCP server testing and debugging.\n\n**Key features:**\n\n* Full support of all major MCP features (tools, prompts, resources, and subscriptions)\n* Fast, seamless UI for debugging MCP capabilities\n* MCP config integration (Claude, VSCode, etc.) for fast first-time experience in testing MCPs\n* Integration with history, variables, and collections for reuse and collaboration\n\n### Slack MCP Client\n\n[Slack MCP Client](https://github.com/tuannvm/slack-mcp-client) acts as a bridge between Slack and Model Context Protocol (MCP) servers. Using Slack as the interface, it enables large language models (LLMs) to connect and interact with various MCP servers through standardized MCP tools.\n\n**Key features:**\n\n* **Supports Popular LLM Providers:** Integrates seamlessly with leading large language model providers such as OpenAI, Anthropic, and Ollama, allowing users to leverage advanced conversational AI and orchestration capabilities within Slack.\n* **Dynamic and Secure Integration:** Supports dynamic registration of MCP tools, works in both channels and direct messages and manages credentials securely via environment variables or Kubernetes secrets.\n* **Easy Deployment and Extensibility:** Offers official Docker images, a Helm chart for Kubernetes, and Docker Compose for local development, making it simple to deploy, configure, and extend with additional MCP servers or tools.\n\n### Sourcegraph Cody\n\n[Cody](https://openctx.org/docs/providers/modelcontextprotocol) is Sourcegraph's AI coding assistant, which implements MCP through OpenCTX.\n\n**Key features:**\n\n* Support for MCP resources\n* Integration with Sourcegraph's code intelligence\n* Uses OpenCTX as an abstraction layer\n* Future support planned for additional MCP features\n\n### SpinAI\n\n[SpinAI](https://spinai.dev) is an open-source TypeScript framework for building observable AI agents. The framework provides native MCP compatibility, allowing agents to seamlessly integrate with MCP servers and tools.\n\n**Key features:**\n\n* Built-in MCP compatibility for AI agents\n* Open-source TypeScript framework\n* Observable agent architecture\n* Native support for MCP tools integration\n\n### Superinterface\n\n[Superinterface](https://superinterface.ai) is AI infrastructure and a developer platform to build in-app AI assistants with support for MCP, interactive components, client-side function calling and more.\n\n**Key features:**\n\n* Use tools from MCP servers in assistants embedded via React components or script tags\n* SSE transport support\n* Use any AI model from any AI provider (OpenAI, Anthropic, Ollama, others)\n\n### Superjoin\n\n[Superjoin](https://superjoin.ai) brings the power of MCP directly into Google Sheets extension. With Superjoin, users can access and invoke MCP tools and agents without leaving their spreadsheets, enabling powerful AI workflows and automation right where their data lives.\n\n**Key features:**\n\n* Native Google Sheets add-on providing effortless access to MCP capabilities\n* Supports OAuth 2.1 and header-based authentication for secure and flexible connections\n* Compatible with both SSE and Streamable HTTP transport for efficient, real-time streaming communication\n* Fully web-based, cross-platform client requiring no additional software installation\n\n### TheiaAI/TheiaIDE\n\n[Theia AI](https://eclipsesource.com/blogs/2024/10/07/introducing-theia-ai/) is a framework for building AI-enhanced tools and IDEs. The [AI-powered Theia IDE](https://eclipsesource.com/blogs/2024/10/08/introducting-ai-theia-ide/) is an open and flexible development environment built on Theia AI.\n\n**Key features:**\n\n* **Tool Integration**: Theia AI enables AI agents, including those in the Theia IDE, to utilize MCP servers for seamless tool interaction.\n* **Customizable Prompts**: The Theia IDE allows users to define and adapt prompts, dynamically integrating MCP servers for tailored workflows.\n* **Custom agents**: The Theia IDE supports creating custom agents that leverage MCP capabilities, enabling users to design dedicated workflows on the fly.\n\nTheia AI and Theia IDE's MCP integration provide users with flexibility, making them powerful platforms for exploring and adapting MCP.\n\n**Learn more:**\n\n* [Theia IDE and Theia AI MCP Announcement](https://eclipsesource.com/blogs/2024/12/19/theia-ide-and-theia-ai-support-mcp/)\n* [Download the AI-powered Theia IDE](https://theia-ide.org/)\n\n### Tome\n\n[Tome](https://github.com/runebookai/tome) is an open source cross-platform desktop app designed for working with local LLMs and MCP servers. It is designed to be beginner friendly and abstract away the nitty gritty of configuration for people getting started with MCP.\n\n**Key features:**\n\n* MCP servers are managed by Tome so there is no need to install uv or npm or configure JSON\n* Users can quickly add or remove MCP servers via UI\n* Any tool-supported local model on Ollama is compatible\n\n### TypingMind App\n\n[TypingMind](https://www.typingmind.com) is an advanced frontend for LLMs with MCP support. TypingMind supports all popular LLM providers like OpenAI, Gemini, Claude, and users can use with their own API keys.\n\n**Key features:**\n\n* **MCP Tool Integration**: Once MCP is configured, MCP tools will show up as plugins that can be enabled/disabled easily via the main app interface.\n* **Assign MCP Tools to Agents**: TypingMind allows users to create AI agents that have a set of MCP servers assigned.\n* **Remote MCP servers**: Allows users to customize where to run the MCP servers via its MCP Connector configuration, allowing the use of MCP tools across multiple devices (laptop, mobile devices, etc.) or control MCP servers from a remote private server.\n\n**Learn more:**\n\n* [TypingMind MCP Document](https://www.typingmind.com/mcp)\n* [Download TypingMind (PWA)](https://www.typingmind.com/)\n\n### VS Code GitHub Copilot\n\n[VS Code](https://code.visualstudio.com/) integrates MCP with GitHub Copilot through [agent mode](https://code.visualstudio.com/docs/copilot/chat/chat-agent-mode), allowing direct interaction with MCP-provided tools within your agentic coding workflow. Configure servers in Claude Desktop, workspace or user settings, with guided MCP installation and secure handling of keys in input variables to avoid leaking hard-coded keys.\n\n**Key features:**\n\n* Support for stdio and server-sent events (SSE) transport\n* Per-session selection of tools per agent session for optimal performance\n* Easy server debugging with restart commands and output logging\n* Tool calls with editable inputs and always-allow toggle\n* Integration with existing VS Code extension system to register MCP servers from extensions\n\n### Warp\n\n[Warp](https://www.warp.dev/) is the intelligent terminal with AI and your dev team's knowledge built-in. With natural language capabilities integrated directly into an agentic command line, Warp enables developers to code, automate, and collaborate more efficiently -- all within a terminal that features a modern UX.\n\n**Key features:**\n\n* **Agent Mode with MCP support**: invoke tools and access data from MCP servers using natural language prompts\n* **Flexible server management**: add and manage CLI or SSE-based MCP servers via Warp's built-in UI\n* **Live tool/resource discovery**: view tools and resources from each running MCP server\n* **Configurable startup**: set MCP servers to start automatically with Warp or launch them manually as needed\n\n### WhatsMCP\n\n[WhatsMCP](https://wassist.app/mcp/) is an MCP client for WhatsApp. WhatsMCP lets you interact with your AI stack from the comfort of a WhatsApp chat.\n\n**Key features:**\n\n* Supports MCP tools\n* SSE transport, full OAuth2 support\n* Chat flow management for WhatsApp messages\n* One click setup for connecting to your MCP servers\n* In chat management of MCP servers\n* Oauth flow natively supported in WhatsApp\n\n### Windsurf Editor\n\n[Windsurf Editor](https://codeium.com/windsurf) is an agentic IDE that combines AI assistance with developer workflows. It features an innovative AI Flow system that enables both collaborative and independent AI interactions while maintaining developer control.\n\n**Key features:**\n\n* Revolutionary AI Flow paradigm for human-AI collaboration\n* Intelligent code generation and understanding\n* Rich development tools with multi-model support\n\n### Witsy\n\n[Witsy](https://github.com/nbonamy/witsy) is an AI desktop assistant, supporting Anthropic models and MCP servers as LLM tools.\n\n**Key features:**\n\n* Multiple MCP servers support\n* Tool integration for executing commands and scripts\n* Local server connections for enhanced privacy and security\n* Easy-install from Smithery.ai\n* Open-source, available for macOS, Windows and Linux\n\n### Zed\n\n[Zed](https://zed.dev/docs/assistant/model-context-protocol) is a high-performance code editor with built-in MCP support, focusing on prompt templates and tool integration.\n\n**Key features:**\n\n* Prompt templates surface as slash commands in the editor\n* Tool integration for enhanced coding workflows\n* Tight integration with editor features and workspace context\n* Does not support MCP resources\n\n### Zencoder\n\n[Zencoder](https://zecoder.ai) is a coding agent that's available as an extension for VS Code and JetBrains family of IDEs, meeting developers where they already work. It comes with RepoGrokking (deep contextual codebase understanding), agentic pipeline, and the ability to create and share custom agents.\n\n**Key features:**\n\n* RepoGrokking - deep contextual understanding of codebases\n* Agentic pipeline - runs, tests, and executes code before outputting it\n* Zen Agents platform - ability to build and create custom agents and share with the team\n* Integrated MCP tool library with one-click installations\n* Specialized agents for Unit and E2E Testing\n\n**Learn more:**\n\n* [Zencoder Documentation](https://docs.zencoder.ai)\n\n## Adding MCP support to your application\n\nIf you've added MCP support to your application, we encourage you to submit a pull request to add it to this list. MCP integration can provide your users with powerful contextual AI capabilities and make your application part of the growing MCP ecosystem.\n\nBenefits of adding MCP support:\n\n* Enable users to bring their own context and tools\n* Join a growing ecosystem of interoperable AI applications\n* Provide users with flexible integration options\n* Support local-first AI workflows\n\nTo get started with implementing MCP in your application, check out our [Python](https://github.com/modelcontextprotocol/python-sdk) or [TypeScript SDK Documentation](https://github.com/modelcontextprotocol/typescript-sdk)\n\n## Updates and corrections\n\nThis list is maintained by the community. If you notice any inaccuracies or would like to update information about MCP support in your application, please submit a pull request or [open an issue in our documentation repository](https://github.com/modelcontextprotocol/modelcontextprotocol/issues).\n\n\n# Contributing\nSource: https://modelcontextprotocol.io/development/contributing\n\nHow to participate in Model Context Protocol development\n\nWe welcome contributions from the community! Please review our [contributing guidelines](https://github.com/modelcontextprotocol/modelcontextprotocol/blob/main/CONTRIBUTING.md) for details on how to submit changes.\n\nAll contributors must adhere to our [Code of Conduct](https://github.com/modelcontextprotocol/modelcontextprotocol/blob/main/CODE_OF_CONDUCT.md).\n\nFor questions and discussions, please use [GitHub Discussions](https://github.com/modelcontextprotocol/modelcontextprotocol/discussions).\n\n\n# Roadmap\nSource: https://modelcontextprotocol.io/development/roadmap\n\nOur plans for evolving Model Context Protocol\n\n<Info>Last updated: **2025-03-27**</Info>\n\nThe Model Context Protocol is rapidly evolving. This page outlines our current thinking on key priorities and direction for approximately **the next six months**, though these may change significantly as the project develops. To see what's changed recently, check out the **[specification changelog](/specification/2025-06-18/changelog/)**.\n\n<Note>\n  The ideas presented here are not commitments—we may solve these challenges differently than described, or some may not materialize at all. This is also not an *exhaustive* list; we may incorporate work that isn't mentioned here.\n</Note>\n\nWe value community participation! Each section links to relevant discussions where you can learn more and contribute your thoughts.\n\nFor a technical view of our standardization process, visit the [Standards Track](https://github.com/orgs/modelcontextprotocol/projects/2/views/2) on GitHub, which tracks how proposals progress toward inclusion in the official [MCP specification](https://spec.modelcontextprotocol.io).\n\n## Validation\n\nTo foster a robust developer ecosystem, we plan to invest in:\n\n* **Reference Client Implementations**: demonstrating protocol features with high-quality AI applications\n* **Compliance Test Suites**: automated verification that clients, servers, and SDKs properly implement the specification\n\nThese tools will help developers confidently implement MCP while ensuring consistent behavior across the ecosystem.\n\n## Registry\n\nFor MCP to reach its full potential, we need streamlined ways to distribute and discover MCP servers.\n\nWe plan to develop an [**MCP Registry**](https://github.com/orgs/modelcontextprotocol/discussions/159) that will enable centralized server discovery and metadata. This registry will primarily function as an API layer that third-party marketplaces and discovery services can build upon.\n\n## Agents\n\nAs MCP increasingly becomes part of agentic workflows, we're exploring [improvements](https://github.com/modelcontextprotocol/specification/discussions/111) such as:\n\n* **[Agent Graphs](https://github.com/modelcontextprotocol/specification/discussions/94)**: enabling complex agent topologies through namespacing and graph-aware communication patterns\n* **Interactive Workflows**: improving human-in-the-loop experiences with granular permissioning, standardized interaction patterns, and [ways to directly communicate](https://github.com/modelcontextprotocol/specification/issues/97) with the end user\n\n## Multimodality\n\nSupporting the full spectrum of AI capabilities in MCP, including:\n\n* **Additional Modalities**: video and other media types\n* **[Streaming](https://github.com/modelcontextprotocol/specification/issues/117)**: multipart, chunked messages, and bidirectional communication for interactive experiences\n\n## Governance\n\nWe're implementing governance structures that prioritize:\n\n* **Community-Led Development**: fostering a collaborative ecosystem where community members and AI developers can all participate in MCP's evolution, ensuring it serves diverse applications and use cases\n* **Transparent Standardization**: establishing clear processes for contributing to the specification, while exploring formal standardization via industry bodies\n\n## Get Involved\n\nWe welcome your contributions to MCP's future! Join our [GitHub Discussions](https://github.com/orgs/modelcontextprotocol/discussions) to share ideas, provide feedback, or participate in the development process.\n\n\n# Core architecture\nSource: https://modelcontextprotocol.io/docs/concepts/architecture\n\nUnderstand how MCP connects clients, servers, and LLMs\n\nThe Model Context Protocol (MCP) is built on a flexible, extensible architecture that enables seamless communication between LLM applications and integrations. This document covers the core architectural components and concepts.\n\n## Overview\n\nMCP follows a client-server architecture where:\n\n* **Hosts** are LLM applications (like Claude Desktop or IDEs) that initiate connections\n* **Clients** maintain 1:1 connections with servers, inside the host application\n* **Servers** provide context, tools, and prompts to clients\n\n```mermaid\nflowchart LR\n    subgraph \"Host\"\n        client1[MCP Client]\n        client2[MCP Client]\n    end\n    subgraph \"Server Process\"\n        server1[MCP Server]\n    end\n    subgraph \"Server Process\"\n        server2[MCP Server]\n    end\n\n    client1 <-->|Transport Layer| server1\n    client2 <-->|Transport Layer| server2\n```\n\n## Core components\n\n### Protocol layer\n\nThe protocol layer handles message framing, request/response linking, and high-level communication patterns.\n\n<Tabs>\n  <Tab title=\"TypeScript\">\n    ```typescript\n    class Protocol<Request, Notification, Result> {\n        // Handle incoming requests\n        setRequestHandler<T>(schema: T, handler: (request: T, extra: RequestHandlerExtra) => Promise<Result>): void\n\n        // Handle incoming notifications\n        setNotificationHandler<T>(schema: T, handler: (notification: T) => Promise<void>): void\n\n        // Send requests and await responses\n        request<T>(request: Request, schema: T, options?: RequestOptions): Promise<T>\n\n        // Send one-way notifications\n        notification(notification: Notification): Promise<void>\n    }\n    ```\n  </Tab>\n\n  <Tab title=\"Python\">\n    ```python\n    class Session(BaseSession[RequestT, NotificationT, ResultT]):\n        async def send_request(\n            self,\n            request: RequestT,\n            result_type: type[Result]\n        ) -> Result:\n            \"\"\"Send request and wait for response. Raises McpError if response contains error.\"\"\"\n            # Request handling implementation\n\n        async def send_notification(\n            self,\n            notification: NotificationT\n        ) -> None:\n            \"\"\"Send one-way notification that doesn't expect response.\"\"\"\n            # Notification handling implementation\n\n        async def _received_request(\n            self,\n            responder: RequestResponder[ReceiveRequestT, ResultT]\n        ) -> None:\n            \"\"\"Handle incoming request from other side.\"\"\"\n            # Request handling implementation\n\n        async def _received_notification(\n            self,\n            notification: ReceiveNotificationT\n        ) -> None:\n            \"\"\"Handle incoming notification from other side.\"\"\"\n            # Notification handling implementation\n    ```\n  </Tab>\n</Tabs>\n\nKey classes include:\n\n* `Protocol`\n* `Client`\n* `Server`\n\n### Transport layer\n\nThe transport layer handles the actual communication between clients and servers. MCP supports multiple transport mechanisms:\n\n1. **Stdio transport**\n\n   * Uses standard input/output for communication\n   * Ideal for local processes\n\n2. **Streamable HTTP transport**\n   * Uses HTTP with optional Server-Sent Events for streaming\n   * HTTP POST for client-to-server messages\n\nAll transports use [JSON-RPC](https://www.jsonrpc.org/) 2.0 to exchange messages. See the [specification](/specification/) for detailed information about the Model Context Protocol message format.\n\n### Message types\n\nMCP has these main types of messages:\n\n1. **Requests** expect a response from the other side:\n\n   ```typescript\n   interface Request {\n     method: string;\n     params?: { ... };\n   }\n   ```\n\n2. **Results** are successful responses to requests:\n\n   ```typescript\n   interface Result {\n     [key: string]: unknown;\n   }\n   ```\n\n3. **Errors** indicate that a request failed:\n\n   ```typescript\n   interface Error {\n     code: number;\n     message: string;\n     data?: unknown;\n   }\n   ```\n\n4. **Notifications** are one-way messages that don't expect a response:\n   ```typescript\n   interface Notification {\n     method: string;\n     params?: { ... };\n   }\n   ```\n\n## Connection lifecycle\n\n### 1. Initialization\n\n```mermaid\nsequenceDiagram\n    participant Client\n    participant Server\n\n    Client->>Server: initialize request\n    Server->>Client: initialize response\n    Client->>Server: initialized notification\n\n    Note over Client,Server: Connection ready for use\n```\n\n1. Client sends `initialize` request with protocol version and capabilities\n2. Server responds with its protocol version and capabilities\n3. Client sends `initialized` notification as acknowledgment\n4. Normal message exchange begins\n\n### 2. Message exchange\n\nAfter initialization, the following patterns are supported:\n\n* **Request-Response**: Client or server sends requests, the other responds\n* **Notifications**: Either party sends one-way messages\n\n### 3. Termination\n\nEither party can terminate the connection:\n\n* Clean shutdown via `close()`\n* Transport disconnection\n* Error conditions\n\n## Error handling\n\nMCP defines these standard error codes:\n\n```typescript\nenum ErrorCode {\n  // Standard JSON-RPC error codes\n  ParseError = -32700,\n  InvalidRequest = -32600,\n  MethodNotFound = -32601,\n  InvalidParams = -32602,\n  InternalError = -32603,\n}\n```\n\nSDKs and applications can define their own error codes above -32000.\n\nErrors are propagated through:\n\n* Error responses to requests\n* Error events on transports\n* Protocol-level error handlers\n\n## Implementation example\n\nHere's a basic example of implementing an MCP server:\n\n<Tabs>\n  <Tab title=\"TypeScript\">\n    ```typescript\n    import { Server } from \"@modelcontextprotocol/sdk/server/index.js\";\n    import { StdioServerTransport } from \"@modelcontextprotocol/sdk/server/stdio.js\";\n\n    const server = new Server({\n      name: \"example-server\",\n      version: \"1.0.0\"\n    }, {\n      capabilities: {\n        resources: {}\n      }\n    });\n\n    // Handle requests\n    server.setRequestHandler(ListResourcesRequestSchema, async () => {\n      return {\n        resources: [\n          {\n            uri: \"example://resource\",\n            name: \"Example Resource\"\n          }\n        ]\n      };\n    });\n\n    // Connect transport\n    const transport = new StdioServerTransport();\n    await server.connect(transport);\n    ```\n  </Tab>\n\n  <Tab title=\"Python\">\n    ```python\n    import asyncio\n    import mcp.types as types\n    from mcp.server import Server\n    from mcp.server.stdio import stdio_server\n\n    app = Server(\"example-server\")\n\n    @app.list_resources()\n    async def list_resources() -> list[types.Resource]:\n        return [\n            types.Resource(\n                uri=\"example://resource\",\n                name=\"Example Resource\"\n            )\n        ]\n\n    async def main():\n        async with stdio_server() as streams:\n            await app.run(\n                streams[0],\n                streams[1],\n                app.create_initialization_options()\n            )\n\n    if __name__ == \"__main__\":\n        asyncio.run(main())\n    ```\n  </Tab>\n</Tabs>\n\n## Best practices\n\n### Transport selection\n\n1. **Local communication**\n\n   * Use stdio transport for local processes\n   * Efficient for same-machine communication\n   * Simple process management\n\n2. **Remote communication**\n   * Use Streamable HTTP for scenarios requiring HTTP compatibility\n   * Consider security implications including authentication and authorization\n\n### Message handling\n\n1. **Request processing**\n\n   * Validate inputs thoroughly\n   * Use type-safe schemas\n   * Handle errors gracefully\n   * Implement timeouts\n\n2. **Progress reporting**\n\n   * Use progress tokens for long operations\n   * Report progress incrementally\n   * Include total progress when known\n\n3. **Error management**\n   * Use appropriate error codes\n   * Include helpful error messages\n   * Clean up resources on errors\n\n## Security considerations\n\n1. **Transport security**\n\n   * Use TLS for remote connections\n   * Validate connection origins\n   * Implement authentication when needed\n\n2. **Message validation**\n\n   * Validate all incoming messages\n   * Sanitize inputs\n   * Check message size limits\n   * Verify JSON-RPC format\n\n3. **Resource protection**\n\n   * Implement access controls\n   * Validate resource paths\n   * Monitor resource usage\n   * Rate limit requests\n\n4. **Error handling**\n   * Don't leak sensitive information\n   * Log security-relevant errors\n   * Implement proper cleanup\n   * Handle DoS scenarios\n\n## Debugging and monitoring\n\n1. **Logging**\n\n   * Log protocol events\n   * Track message flow\n   * Monitor performance\n   * Record errors\n\n2. **Diagnostics**\n\n   * Implement health checks\n   * Monitor connection state\n   * Track resource usage\n   * Profile performance\n\n3. **Testing**\n   * Test different transports\n   * Verify error handling\n   * Check edge cases\n   * Load test servers\n\n\n# Prompts\nSource: https://modelcontextprotocol.io/docs/concepts/prompts\n\nCreate reusable prompt templates and workflows\n\nPrompts enable servers to define reusable prompt templates and workflows that clients can easily surface to users and LLMs. They provide a powerful way to standardize and share common LLM interactions.\n\n<Note>\n  Prompts are designed to be **user-controlled**, meaning they are exposed from servers to clients with the intention of the user being able to explicitly select them for use.\n</Note>\n\n## Overview\n\nPrompts in MCP are predefined templates that can:\n\n* Accept dynamic arguments\n* Include context from resources\n* Chain multiple interactions\n* Guide specific workflows\n* Surface as UI elements (like slash commands)\n\n## Prompt structure\n\nEach prompt is defined with:\n\n```typescript\n{\n  name: string;              // Unique identifier for the prompt\n  description?: string;      // Human-readable description\n  arguments?: [              // Optional list of arguments\n    {\n      name: string;          // Argument identifier\n      description?: string;  // Argument description\n      required?: boolean;    // Whether argument is required\n    }\n  ]\n}\n```\n\n## Discovering prompts\n\nClients can discover available prompts by sending a `prompts/list` request:\n\n```typescript\n// Request\n{\n  method: \"prompts/list\";\n}\n\n// Response\n{\n  prompts: [\n    {\n      name: \"analyze-code\",\n      description: \"Analyze code for potential improvements\",\n      arguments: [\n        {\n          name: \"language\",\n          description: \"Programming language\",\n          required: true,\n        },\n      ],\n    },\n  ];\n}\n```\n\n## Using prompts\n\nTo use a prompt, clients make a `prompts/get` request:\n\n````typescript\n// Request\n{\n  method: \"prompts/get\",\n  params: {\n    name: \"analyze-code\",\n    arguments: {\n      language: \"python\"\n    }\n  }\n}\n\n// Response\n{\n  description: \"Analyze Python code for potential improvements\",\n  messages: [\n    {\n      role: \"user\",\n      content: {\n        type: \"text\",\n        text: \"Please analyze the following Python code for potential improvements:\\n\\n```python\\ndef calculate_sum(numbers):\\n    total = 0\\n    for num in numbers:\\n        total = total + num\\n    return total\\n\\nresult = calculate_sum([1, 2, 3, 4, 5])\\nprint(result)\\n```\"\n      }\n    }\n  ]\n}\n````\n\n## Dynamic prompts\n\nPrompts can be dynamic and include:\n\n### Embedded resource context\n\n```json\n{\n  \"name\": \"analyze-project\",\n  \"description\": \"Analyze project logs and code\",\n  \"arguments\": [\n    {\n      \"name\": \"timeframe\",\n      \"description\": \"Time period to analyze logs\",\n      \"required\": true\n    },\n    {\n      \"name\": \"fileUri\",\n      \"description\": \"URI of code file to review\",\n      \"required\": true\n    }\n  ]\n}\n```\n\nWhen handling the `prompts/get` request:\n\n```json\n{\n  \"messages\": [\n    {\n      \"role\": \"user\",\n      \"content\": {\n        \"type\": \"text\",\n        \"text\": \"Analyze these system logs and the code file for any issues:\"\n      }\n    },\n    {\n      \"role\": \"user\",\n      \"content\": {\n        \"type\": \"resource\",\n        \"resource\": {\n          \"uri\": \"logs://recent?timeframe=1h\",\n          \"text\": \"[2024-03-14 15:32:11] ERROR: Connection timeout in network.py:127\\n[2024-03-14 15:32:15] WARN: Retrying connection (attempt 2/3)\\n[2024-03-14 15:32:20] ERROR: Max retries exceeded\",\n          \"mimeType\": \"text/plain\"\n        }\n      }\n    },\n    {\n      \"role\": \"user\",\n      \"content\": {\n        \"type\": \"resource\",\n        \"resource\": {\n          \"uri\": \"file:///path/to/code.py\",\n          \"text\": \"def connect_to_service(timeout=30):\\n    retries = 3\\n    for attempt in range(retries):\\n        try:\\n            return establish_connection(timeout)\\n        except TimeoutError:\\n            if attempt == retries - 1:\\n                raise\\n            time.sleep(5)\\n\\ndef establish_connection(timeout):\\n    # Connection implementation\\n    pass\",\n          \"mimeType\": \"text/x-python\"\n        }\n      }\n    }\n  ]\n}\n```\n\n### Multi-step workflows\n\n```typescript\nconst debugWorkflow = {\n  name: \"debug-error\",\n  async getMessages(error: string) {\n    return [\n      {\n        role: \"user\",\n        content: {\n          type: \"text\",\n          text: `Here's an error I'm seeing: ${error}`,\n        },\n      },\n      {\n        role: \"assistant\",\n        content: {\n          type: \"text\",\n          text: \"I'll help analyze this error. What have you tried so far?\",\n        },\n      },\n      {\n        role: \"user\",\n        content: {\n          type: \"text\",\n          text: \"I've tried restarting the service, but the error persists.\",\n        },\n      },\n    ];\n  },\n};\n```\n\n## Example implementation\n\nHere's a complete example of implementing prompts in an MCP server:\n\n<Tabs>\n  <Tab title=\"TypeScript\">\n    ```typescript\n    import { Server } from \"@modelcontextprotocol/sdk/server\";\n    import {\n      ListPromptsRequestSchema,\n      GetPromptRequestSchema\n    } from \"@modelcontextprotocol/sdk/types\";\n\n    const PROMPTS = {\n      \"git-commit\": {\n        name: \"git-commit\",\n        description: \"Generate a Git commit message\",\n        arguments: [\n          {\n            name: \"changes\",\n            description: \"Git diff or description of changes\",\n            required: true\n          }\n        ]\n      },\n      \"explain-code\": {\n        name: \"explain-code\",\n        description: \"Explain how code works\",\n        arguments: [\n          {\n            name: \"code\",\n            description: \"Code to explain\",\n            required: true\n          },\n          {\n            name: \"language\",\n            description: \"Programming language\",\n            required: false\n          }\n        ]\n      }\n    };\n\n    const server = new Server({\n      name: \"example-prompts-server\",\n      version: \"1.0.0\"\n    }, {\n      capabilities: {\n        prompts: {}\n      }\n    });\n\n    // List available prompts\n    server.setRequestHandler(ListPromptsRequestSchema, async () => {\n      return {\n        prompts: Object.values(PROMPTS)\n      };\n    });\n\n    // Get specific prompt\n    server.setRequestHandler(GetPromptRequestSchema, async (request) => {\n      const prompt = PROMPTS[request.params.name];\n      if (!prompt) {\n        throw new Error(`Prompt not found: ${request.params.name}`);\n      }\n\n      if (request.params.name === \"git-commit\") {\n        return {\n          messages: [\n            {\n              role: \"user\",\n              content: {\n                type: \"text\",\n                text: `Generate a concise but descriptive commit message for these changes:\\n\\n${request.params.arguments?.changes}`\n              }\n            }\n          ]\n        };\n      }\n\n      if (request.params.name === \"explain-code\") {\n        const language = request.params.arguments?.language || \"Unknown\";\n        return {\n          messages: [\n            {\n              role: \"user\",\n              content: {\n                type: \"text\",\n                text: `Explain how this ${language} code works:\\n\\n${request.params.arguments?.code}`\n              }\n            }\n          ]\n        };\n      }\n\n      throw new Error(\"Prompt implementation not found\");\n    });\n    ```\n  </Tab>\n\n  <Tab title=\"Python\">\n    ```python\n    from mcp.server import Server\n    import mcp.types as types\n\n    # Define available prompts\n    PROMPTS = {\n        \"git-commit\": types.Prompt(\n            name=\"git-commit\",\n            description=\"Generate a Git commit message\",\n            arguments=[\n                types.PromptArgument(\n                    name=\"changes\",\n                    description=\"Git diff or description of changes\",\n                    required=True\n                )\n            ],\n        ),\n        \"explain-code\": types.Prompt(\n            name=\"explain-code\",\n            description=\"Explain how code works\",\n            arguments=[\n                types.PromptArgument(\n                    name=\"code\",\n                    description=\"Code to explain\",\n                    required=True\n                ),\n                types.PromptArgument(\n                    name=\"language\",\n                    description=\"Programming language\",\n                    required=False\n                )\n            ],\n        )\n    }\n\n    # Initialize server\n    app = Server(\"example-prompts-server\")\n\n    @app.list_prompts()\n    async def list_prompts() -> list[types.Prompt]:\n        return list(PROMPTS.values())\n\n    @app.get_prompt()\n    async def get_prompt(\n        name: str, arguments: dict[str, str] | None = None\n    ) -> types.GetPromptResult:\n        if name not in PROMPTS:\n            raise ValueError(f\"Prompt not found: {name}\")\n\n        if name == \"git-commit\":\n            changes = arguments.get(\"changes\") if arguments else \"\"\n            return types.GetPromptResult(\n                messages=[\n                    types.PromptMessage(\n                        role=\"user\",\n                        content=types.TextContent(\n                            type=\"text\",\n                            text=f\"Generate a concise but descriptive commit message \"\n                            f\"for these changes:\\n\\n{changes}\"\n                        )\n                    )\n                ]\n            )\n\n        if name == \"explain-code\":\n            code = arguments.get(\"code\") if arguments else \"\"\n            language = arguments.get(\"language\", \"Unknown\") if arguments else \"Unknown\"\n            return types.GetPromptResult(\n                messages=[\n                    types.PromptMessage(\n                        role=\"user\",\n                        content=types.TextContent(\n                            type=\"text\",\n                            text=f\"Explain how this {language} code works:\\n\\n{code}\"\n                        )\n                    )\n                ]\n            )\n\n        raise ValueError(\"Prompt implementation not found\")\n    ```\n  </Tab>\n</Tabs>\n\n## Best practices\n\nWhen implementing prompts:\n\n1. Use clear, descriptive prompt names\n2. Provide detailed descriptions for prompts and arguments\n3. Validate all required arguments\n4. Handle missing arguments gracefully\n5. Consider versioning for prompt templates\n6. Cache dynamic content when appropriate\n7. Implement error handling\n8. Document expected argument formats\n9. Consider prompt composability\n10. Test prompts with various inputs\n\n## UI integration\n\nPrompts can be surfaced in client UIs as:\n\n* Slash commands\n* Quick actions\n* Context menu items\n* Command palette entries\n* Guided workflows\n* Interactive forms\n\n## Updates and changes\n\nServers can notify clients about prompt changes:\n\n1. Server capability: `prompts.listChanged`\n2. Notification: `notifications/prompts/list_changed`\n3. Client re-fetches prompt list\n\n## Security considerations\n\nWhen implementing prompts:\n\n* Validate all arguments\n* Sanitize user input\n* Consider rate limiting\n* Implement access controls\n* Audit prompt usage\n* Handle sensitive data appropriately\n* Validate generated content\n* Implement timeouts\n* Consider prompt injection risks\n* Document security requirements\n\n\n# Resources\nSource: https://modelcontextprotocol.io/docs/concepts/resources\n\nExpose data and content from your servers to LLMs\n\nResources are a core primitive in the Model Context Protocol (MCP) that allow servers to expose data and content that can be read by clients and used as context for LLM interactions.\n\n<Note>\n  Resources are designed to be **application-controlled**, meaning that the client application can decide how and when they should be used.\n  Different MCP clients may handle resources differently. For example:\n\n  * Claude Desktop currently requires users to explicitly select resources before they can be used\n  * Other clients might automatically select resources based on heuristics\n  * Some implementations may even allow the AI model itself to determine which resources to use\n\n  Server authors should be prepared to handle any of these interaction patterns when implementing resource support. In order to expose data to models automatically, server authors should use a **model-controlled** primitive such as [Tools](./tools).\n</Note>\n\n## Overview\n\nResources represent any kind of data that an MCP server wants to make available to clients. This can include:\n\n* File contents\n* Database records\n* API responses\n* Live system data\n* Screenshots and images\n* Log files\n* And more\n\nEach resource is identified by a unique URI and can contain either text or binary data.\n\n## Resource URIs\n\nResources are identified using URIs that follow this format:\n\n```\n[protocol]://[host]/[path]\n```\n\nFor example:\n\n* `file:///home/user/documents/report.pdf`\n* `postgres://database/customers/schema`\n* `screen://localhost/display1`\n\nThe protocol and path structure is defined by the MCP server implementation. Servers can define their own custom URI schemes.\n\n## Resource types\n\nResources can contain two types of content:\n\n### Text resources\n\nText resources contain UTF-8 encoded text data. These are suitable for:\n\n* Source code\n* Configuration files\n* Log files\n* JSON/XML data\n* Plain text\n\n### Binary resources\n\nBinary resources contain raw binary data encoded in base64. These are suitable for:\n\n* Images\n* PDFs\n* Audio files\n* Video files\n* Other non-text formats\n\n## Resource discovery\n\nClients can discover available resources through two main methods:\n\n### Direct resources\n\nServers expose a list of resources via the `resources/list` request. Each resource includes:\n\n```typescript\n{\n  uri: string;           // Unique identifier for the resource\n  name: string;          // Human-readable name\n  description?: string;  // Optional description\n  mimeType?: string;     // Optional MIME type\n  size?: number;         // Optional size in bytes\n}\n```\n\n### Resource templates\n\nFor dynamic resources, servers can expose [URI templates](https://datatracker.ietf.org/doc/html/rfc6570) that clients can use to construct valid resource URIs:\n\n```typescript\n{\n  uriTemplate: string;   // URI template following RFC 6570\n  name: string;          // Human-readable name for this type\n  description?: string;  // Optional description\n  mimeType?: string;     // Optional MIME type for all matching resources\n}\n```\n\n## Reading resources\n\nTo read a resource, clients make a `resources/read` request with the resource URI.\n\nThe server responds with a list of resource contents:\n\n```typescript\n{\n  contents: [\n    {\n      uri: string;        // The URI of the resource\n      mimeType?: string;  // Optional MIME type\n\n      // One of:\n      text?: string;      // For text resources\n      blob?: string;      // For binary resources (base64 encoded)\n    }\n  ]\n}\n```\n\n<Tip>\n  Servers may return multiple resources in response to one `resources/read` request. This could be used, for example, to return a list of files inside a directory when the directory is read.\n</Tip>\n\n## Resource updates\n\nMCP supports real-time updates for resources through two mechanisms:\n\n### List changes\n\nServers can notify clients when their list of available resources changes via the `notifications/resources/list_changed` notification.\n\n### Content changes\n\nClients can subscribe to updates for specific resources:\n\n1. Client sends `resources/subscribe` with resource URI\n2. Server sends `notifications/resources/updated` when the resource changes\n3. Client can fetch latest content with `resources/read`\n4. Client can unsubscribe with `resources/unsubscribe`\n\n## Example implementation\n\nHere's a simple example of implementing resource support in an MCP server:\n\n<Tabs>\n  <Tab title=\"TypeScript\">\n    ```typescript\n    const server = new Server({\n      name: \"example-server\",\n      version: \"1.0.0\"\n    }, {\n      capabilities: {\n        resources: {}\n      }\n    });\n\n    // List available resources\n    server.setRequestHandler(ListResourcesRequestSchema, async () => {\n      return {\n        resources: [\n          {\n            uri: \"file:///logs/app.log\",\n            name: \"Application Logs\",\n            mimeType: \"text/plain\"\n          }\n        ]\n      };\n    });\n\n    // Read resource contents\n    server.setRequestHandler(ReadResourceRequestSchema, async (request) => {\n      const uri = request.params.uri;\n\n      if (uri === \"file:///logs/app.log\") {\n        const logContents = await readLogFile();\n        return {\n          contents: [\n            {\n              uri,\n              mimeType: \"text/plain\",\n              text: logContents\n            }\n          ]\n        };\n      }\n\n      throw new Error(\"Resource not found\");\n    });\n    ```\n  </Tab>\n\n  <Tab title=\"Python\">\n    ```python\n    app = Server(\"example-server\")\n\n    @app.list_resources()\n    async def list_resources() -> list[types.Resource]:\n        return [\n            types.Resource(\n                uri=\"file:///logs/app.log\",\n                name=\"Application Logs\",\n                mimeType=\"text/plain\"\n            )\n        ]\n\n    @app.read_resource()\n    async def read_resource(uri: AnyUrl) -> str:\n        if str(uri) == \"file:///logs/app.log\":\n            log_contents = await read_log_file()\n            return log_contents\n\n        raise ValueError(\"Resource not found\")\n\n    # Start server\n    async with stdio_server() as streams:\n        await app.run(\n            streams[0],\n            streams[1],\n            app.create_initialization_options()\n        )\n    ```\n  </Tab>\n</Tabs>\n\n## Best practices\n\nWhen implementing resource support:\n\n1. Use clear, descriptive resource names and URIs\n2. Include helpful descriptions to guide LLM understanding\n3. Set appropriate MIME types when known\n4. Implement resource templates for dynamic content\n5. Use subscriptions for frequently changing resources\n6. Handle errors gracefully with clear error messages\n7. Consider pagination for large resource lists\n8. Cache resource contents when appropriate\n9. Validate URIs before processing\n10. Document your custom URI schemes\n\n## Security considerations\n\nWhen exposing resources:\n\n* Validate all resource URIs\n* Implement appropriate access controls\n* Sanitize file paths to prevent directory traversal\n* Be cautious with binary data handling\n* Consider rate limiting for resource reads\n* Audit resource access\n* Encrypt sensitive data in transit\n* Validate MIME types\n* Implement timeouts for long-running reads\n* Handle resource cleanup appropriately\n\n\n# Roots\nSource: https://modelcontextprotocol.io/docs/concepts/roots\n\nUnderstanding roots in MCP\n\nRoots are a concept in MCP that define the boundaries where servers can operate. They provide a way for clients to inform servers about relevant resources and their locations.\n\n## What are Roots?\n\nA root is a URI that a client suggests a server should focus on. When a client connects to a server, it declares which roots the server should work with. While primarily used for filesystem paths, roots can be any valid URI including HTTP URLs.\n\nFor example, roots could be:\n\n```\nfile:///home/user/projects/myapp\nhttps://api.example.com/v1\n```\n\n## Why Use Roots?\n\nRoots serve several important purposes:\n\n1. **Guidance**: They inform servers about relevant resources and locations\n2. **Clarity**: Roots make it clear which resources are part of your workspace\n3. **Organization**: Multiple roots let you work with different resources simultaneously\n\n## How Roots Work\n\nWhen a client supports roots, it:\n\n1. Declares the `roots` capability during connection\n2. Provides a list of suggested roots to the server\n3. Notifies the server when roots change (if supported)\n\nWhile roots are informational and not strictly enforcing, servers should:\n\n1. Respect the provided roots\n2. Use root URIs to locate and access resources\n3. Prioritize operations within root boundaries\n\n## Common Use Cases\n\nRoots are commonly used to define:\n\n* Project directories\n* Repository locations\n* API endpoints\n* Configuration locations\n* Resource boundaries\n\n## Best Practices\n\nWhen working with roots:\n\n1. Only suggest necessary resources\n2. Use clear, descriptive names for roots\n3. Monitor root accessibility\n4. Handle root changes gracefully\n\n## Example\n\nHere's how a typical MCP client might expose roots:\n\n```json\n{\n  \"roots\": [\n    {\n      \"uri\": \"file:///home/user/projects/frontend\",\n      \"name\": \"Frontend Repository\"\n    },\n    {\n      \"uri\": \"https://api.example.com/v1\",\n      \"name\": \"API Endpoint\"\n    }\n  ]\n}\n```\n\nThis configuration suggests the server focus on both a local repository and an API endpoint while keeping them logically separated.\n\n\n# Sampling\nSource: https://modelcontextprotocol.io/docs/concepts/sampling\n\nLet your servers request completions from LLMs\n\nSampling is a powerful MCP feature that allows servers to request LLM completions through the client, enabling sophisticated agentic behaviors while maintaining security and privacy.\n\n<Info>\n  This feature of MCP is not yet supported in the Claude Desktop client.\n</Info>\n\n## How sampling works\n\nThe sampling flow follows these steps:\n\n1. Server sends a `sampling/createMessage` request to the client\n2. Client reviews the request and can modify it\n3. Client samples from an LLM\n4. Client reviews the completion\n5. Client returns the result to the server\n\nThis human-in-the-loop design ensures users maintain control over what the LLM sees and generates.\n\n## Message format\n\nSampling requests use a standardized message format:\n\n```typescript\n{\n  messages: [\n    {\n      role: \"user\" | \"assistant\",\n      content: {\n        type: \"text\" | \"image\",\n\n        // For text:\n        text?: string,\n\n        // For images:\n        data?: string,             // base64 encoded\n        mimeType?: string\n      }\n    }\n  ],\n  modelPreferences?: {\n    hints?: [{\n      name?: string                // Suggested model name/family\n    }],\n    costPriority?: number,         // 0-1, importance of minimizing cost\n    speedPriority?: number,        // 0-1, importance of low latency\n    intelligencePriority?: number  // 0-1, importance of capabilities\n  },\n  systemPrompt?: string,\n  includeContext?: \"none\" | \"thisServer\" | \"allServers\",\n  temperature?: number,\n  maxTokens: number,\n  stopSequences?: string[],\n  metadata?: Record<string, unknown>\n}\n```\n\n## Request parameters\n\n### Messages\n\nThe `messages` array contains the conversation history to send to the LLM. Each message has:\n\n* `role`: Either \"user\" or \"assistant\"\n* `content`: The message content, which can be:\n  * Text content with a `text` field\n  * Image content with `data` (base64) and `mimeType` fields\n\n### Model preferences\n\nThe `modelPreferences` object allows servers to specify their model selection preferences:\n\n* `hints`: Array of model name suggestions that clients can use to select an appropriate model:\n\n  * `name`: String that can match full or partial model names (e.g. \"claude-3\", \"sonnet\")\n  * Clients may map hints to equivalent models from different providers\n  * Multiple hints are evaluated in preference order\n\n* Priority values (0-1 normalized):\n  * `costPriority`: Importance of minimizing costs\n  * `speedPriority`: Importance of low latency response\n  * `intelligencePriority`: Importance of advanced model capabilities\n\nClients make the final model selection based on these preferences and their available models.\n\n### System prompt\n\nAn optional `systemPrompt` field allows servers to request a specific system prompt. The client may modify or ignore this.\n\n### Context inclusion\n\nThe `includeContext` parameter specifies what MCP context to include:\n\n* `\"none\"`: No additional context\n* `\"thisServer\"`: Include context from the requesting server\n* `\"allServers\"`: Include context from all connected MCP servers\n\nThe client controls what context is actually included.\n\n### Sampling parameters\n\nFine-tune the LLM sampling with:\n\n* `temperature`: Controls randomness (0.0 to 1.0)\n* `maxTokens`: Maximum tokens to generate\n* `stopSequences`: Array of sequences that stop generation\n* `metadata`: Additional provider-specific parameters\n\n## Response format\n\nThe client returns a completion result:\n\n```typescript\n{\n  model: string,  // Name of the model used\n  stopReason?: \"endTurn\" | \"stopSequence\" | \"maxTokens\" | string,\n  role: \"user\" | \"assistant\",\n  content: {\n    type: \"text\" | \"image\",\n    text?: string,\n    data?: string,\n    mimeType?: string\n  }\n}\n```\n\n## Example request\n\nHere's an example of requesting sampling from a client:\n\n```json\n{\n  \"method\": \"sampling/createMessage\",\n  \"params\": {\n    \"messages\": [\n      {\n        \"role\": \"user\",\n        \"content\": {\n          \"type\": \"text\",\n          \"text\": \"What files are in the current directory?\"\n        }\n      }\n    ],\n    \"systemPrompt\": \"You are a helpful file system assistant.\",\n    \"includeContext\": \"thisServer\",\n    \"maxTokens\": 100\n  }\n}\n```\n\n## Best practices\n\nWhen implementing sampling:\n\n1. Always provide clear, well-structured prompts\n2. Handle both text and image content appropriately\n3. Set reasonable token limits\n4. Include relevant context through `includeContext`\n5. Validate responses before using them\n6. Handle errors gracefully\n7. Consider rate limiting sampling requests\n8. Document expected sampling behavior\n9. Test with various model parameters\n10. Monitor sampling costs\n\n## Human in the loop controls\n\nSampling is designed with human oversight in mind:\n\n### For prompts\n\n* Clients should show users the proposed prompt\n* Users should be able to modify or reject prompts\n* System prompts can be filtered or modified\n* Context inclusion is controlled by the client\n\n### For completions\n\n* Clients should show users the completion\n* Users should be able to modify or reject completions\n* Clients can filter or modify completions\n* Users control which model is used\n\n## Security considerations\n\nWhen implementing sampling:\n\n* Validate all message content\n* Sanitize sensitive information\n* Implement appropriate rate limits\n* Monitor sampling usage\n* Encrypt data in transit\n* Handle user data privacy\n* Audit sampling requests\n* Control cost exposure\n* Implement timeouts\n* Handle model errors gracefully\n\n## Common patterns\n\n### Agentic workflows\n\nSampling enables agentic patterns like:\n\n* Reading and analyzing resources\n* Making decisions based on context\n* Generating structured data\n* Handling multi-step tasks\n* Providing interactive assistance\n\n### Context management\n\nBest practices for context:\n\n* Request minimal necessary context\n* Structure context clearly\n* Handle context size limits\n* Update context as needed\n* Clean up stale context\n\n### Error handling\n\nRobust error handling should:\n\n* Catch sampling failures\n* Handle timeout errors\n* Manage rate limits\n* Validate responses\n* Provide fallback behaviors\n* Log errors appropriately\n\n## Limitations\n\nBe aware of these limitations:\n\n* Sampling depends on client capabilities\n* Users control sampling behavior\n* Context size has limits\n* Rate limits may apply\n* Costs should be considered\n* Model availability varies\n* Response times vary\n* Not all content types supported\n\n\n# Tools\nSource: https://modelcontextprotocol.io/docs/concepts/tools\n\nEnable LLMs to perform actions through your server\n\nTools are a powerful primitive in the Model Context Protocol (MCP) that enable servers to expose executable functionality to clients. Through tools, LLMs can interact with external systems, perform computations, and take actions in the real world.\n\n<Note>\n  Tools are designed to be **model-controlled**, meaning that tools are exposed from servers to clients with the intention of the AI model being able to automatically invoke them (with a human in the loop to grant approval).\n</Note>\n\n## Overview\n\nTools in MCP allow servers to expose executable functions that can be invoked by clients and used by LLMs to perform actions. Key aspects of tools include:\n\n* **Discovery**: Clients can obtain a list of available tools by sending a `tools/list` request\n* **Invocation**: Tools are called using the `tools/call` request, where servers perform the requested operation and return results\n* **Flexibility**: Tools can range from simple calculations to complex API interactions\n\nLike [resources](/docs/concepts/resources), tools are identified by unique names and can include descriptions to guide their usage. However, unlike resources, tools represent dynamic operations that can modify state or interact with external systems.\n\n## Tool definition structure\n\nEach tool is defined with the following structure:\n\n```typescript\n{\n  name: string;          // Unique identifier for the tool\n  description?: string;  // Human-readable description\n  inputSchema: {         // JSON Schema for the tool's parameters\n    type: \"object\",\n    properties: { ... }  // Tool-specific parameters\n  },\n  annotations?: {        // Optional hints about tool behavior\n    title?: string;      // Human-readable title for the tool\n    readOnlyHint?: boolean;    // If true, the tool does not modify its environment\n    destructiveHint?: boolean; // If true, the tool may perform destructive updates\n    idempotentHint?: boolean;  // If true, repeated calls with same args have no additional effect\n    openWorldHint?: boolean;   // If true, tool interacts with external entities\n  }\n}\n```\n\n## Implementing tools\n\nHere's an example of implementing a basic tool in an MCP server:\n\n<Tabs>\n  <Tab title=\"TypeScript\">\n    ```typescript\n    const server = new Server({\n      name: \"example-server\",\n      version: \"1.0.0\"\n    }, {\n      capabilities: {\n        tools: {}\n      }\n    });\n\n    // Define available tools\n    server.setRequestHandler(ListToolsRequestSchema, async () => {\n      return {\n        tools: [{\n          name: \"calculate_sum\",\n          description: \"Add two numbers together\",\n          inputSchema: {\n            type: \"object\",\n            properties: {\n              a: { type: \"number\" },\n              b: { type: \"number\" }\n            },\n            required: [\"a\", \"b\"]\n          }\n        }]\n      };\n    });\n\n    // Handle tool execution\n    server.setRequestHandler(CallToolRequestSchema, async (request) => {\n      if (request.params.name === \"calculate_sum\") {\n        const { a, b } = request.params.arguments;\n        return {\n          content: [\n            {\n              type: \"text\",\n              text: String(a + b)\n            }\n          ]\n        };\n      }\n      throw new Error(\"Tool not found\");\n    });\n    ```\n  </Tab>\n\n  <Tab title=\"Python\">\n    ```python\n    app = Server(\"example-server\")\n\n    @app.list_tools()\n    async def list_tools() -> list[types.Tool]:\n        return [\n            types.Tool(\n                name=\"calculate_sum\",\n                description=\"Add two numbers together\",\n                inputSchema={\n                    \"type\": \"object\",\n                    \"properties\": {\n                        \"a\": {\"type\": \"number\"},\n                        \"b\": {\"type\": \"number\"}\n                    },\n                    \"required\": [\"a\", \"b\"]\n                }\n            )\n        ]\n\n    @app.call_tool()\n    async def call_tool(\n        name: str,\n        arguments: dict\n    ) -> list[types.TextContent | types.ImageContent | types.EmbeddedResource]:\n        if name == \"calculate_sum\":\n            a = arguments[\"a\"]\n            b = arguments[\"b\"]\n            result = a + b\n            return [types.TextContent(type=\"text\", text=str(result))]\n        raise ValueError(f\"Tool not found: {name}\")\n    ```\n  </Tab>\n</Tabs>\n\n## Example tool patterns\n\nHere are some examples of types of tools that a server could provide:\n\n### System operations\n\nTools that interact with the local system:\n\n```typescript\n{\n  name: \"execute_command\",\n  description: \"Run a shell command\",\n  inputSchema: {\n    type: \"object\",\n    properties: {\n      command: { type: \"string\" },\n      args: { type: \"array\", items: { type: \"string\" } }\n    }\n  }\n}\n```\n\n### API integrations\n\nTools that wrap external APIs:\n\n```typescript\n{\n  name: \"github_create_issue\",\n  description: \"Create a GitHub issue\",\n  inputSchema: {\n    type: \"object\",\n    properties: {\n      title: { type: \"string\" },\n      body: { type: \"string\" },\n      labels: { type: \"array\", items: { type: \"string\" } }\n    }\n  }\n}\n```\n\n### Data processing\n\nTools that transform or analyze data:\n\n```typescript\n{\n  name: \"analyze_csv\",\n  description: \"Analyze a CSV file\",\n  inputSchema: {\n    type: \"object\",\n    properties: {\n      filepath: { type: \"string\" },\n      operations: {\n        type: \"array\",\n        items: {\n          enum: [\"sum\", \"average\", \"count\"]\n        }\n      }\n    }\n  }\n}\n```\n\n## Best practices\n\nWhen implementing tools:\n\n1. Provide clear, descriptive names and descriptions\n2. Use detailed JSON Schema definitions for parameters\n3. Include examples in tool descriptions to demonstrate how the model should use them\n4. Implement proper error handling and validation\n5. Use progress reporting for long operations\n6. Keep tool operations focused and atomic\n7. Document expected return value structures\n8. Implement proper timeouts\n9. Consider rate limiting for resource-intensive operations\n10. Log tool usage for debugging and monitoring\n\n## Security considerations\n\nWhen exposing tools:\n\n### Input validation\n\n* Validate all parameters against the schema\n* Sanitize file paths and system commands\n* Validate URLs and external identifiers\n* Check parameter sizes and ranges\n* Prevent command injection\n\n### Access control\n\n* Implement authentication where needed\n* Use appropriate authorization checks\n* Audit tool usage\n* Rate limit requests\n* Monitor for abuse\n\n### Error handling\n\n* Don't expose internal errors to clients\n* Log security-relevant errors\n* Handle timeouts appropriately\n* Clean up resources after errors\n* Validate return values\n\n## Tool discovery and updates\n\nMCP supports dynamic tool discovery:\n\n1. Clients can list available tools at any time\n2. Servers can notify clients when tools change using `notifications/tools/list_changed`\n3. Tools can be added or removed during runtime\n4. Tool definitions can be updated (though this should be done carefully)\n\n## Error handling\n\nTool errors should be reported within the result object, not as MCP protocol-level errors. This allows the LLM to see and potentially handle the error. When a tool encounters an error:\n\n1. Set `isError` to `true` in the result\n2. Include error details in the `content` array\n\nHere's an example of proper error handling for tools:\n\n<Tabs>\n  <Tab title=\"TypeScript\">\n    ```typescript\n    try {\n      // Tool operation\n      const result = performOperation();\n      return {\n        content: [\n          {\n            type: \"text\",\n            text: `Operation successful: ${result}`\n          }\n        ]\n      };\n    } catch (error) {\n      return {\n        isError: true,\n        content: [\n          {\n            type: \"text\",\n            text: `Error: ${error.message}`\n          }\n        ]\n      };\n    }\n    ```\n  </Tab>\n\n  <Tab title=\"Python\">\n    ```python\n    try:\n        # Tool operation\n        result = perform_operation()\n        return types.CallToolResult(\n            content=[\n                types.TextContent(\n                    type=\"text\",\n                    text=f\"Operation successful: {result}\"\n                )\n            ]\n        )\n    except Exception as error:\n        return types.CallToolResult(\n            isError=True,\n            content=[\n                types.TextContent(\n                    type=\"text\",\n                    text=f\"Error: {str(error)}\"\n                )\n            ]\n        )\n    ```\n  </Tab>\n</Tabs>\n\nThis approach allows the LLM to see that an error occurred and potentially take corrective action or request human intervention.\n\n## Tool annotations\n\nTool annotations provide additional metadata about a tool's behavior, helping clients understand how to present and manage tools. These annotations are hints that describe the nature and impact of a tool, but should not be relied upon for security decisions.\n\n### Purpose of tool annotations\n\nTool annotations serve several key purposes:\n\n1. Provide UX-specific information without affecting model context\n2. Help clients categorize and present tools appropriately\n3. Convey information about a tool's potential side effects\n4. Assist in developing intuitive interfaces for tool approval\n\n### Available tool annotations\n\nThe MCP specification defines the following annotations for tools:\n\n| Annotation        | Type    | Default | Description                                                                                                                          |\n| ----------------- | ------- | ------- | ------------------------------------------------------------------------------------------------------------------------------------ |\n| `title`           | string  | -       | A human-readable title for the tool, useful for UI display                                                                           |\n| `readOnlyHint`    | boolean | false   | If true, indicates the tool does not modify its environment                                                                          |\n| `destructiveHint` | boolean | true    | If true, the tool may perform destructive updates (only meaningful when `readOnlyHint` is false)                                     |\n| `idempotentHint`  | boolean | false   | If true, calling the tool repeatedly with the same arguments has no additional effect (only meaningful when `readOnlyHint` is false) |\n| `openWorldHint`   | boolean | true    | If true, the tool may interact with an \"open world\" of external entities                                                             |\n\n### Example usage\n\nHere's how to define tools with annotations for different scenarios:\n\n```typescript\n// A read-only search tool\n{\n  name: \"web_search\",\n  description: \"Search the web for information\",\n  inputSchema: {\n    type: \"object\",\n    properties: {\n      query: { type: \"string\" }\n    },\n    required: [\"query\"]\n  },\n  annotations: {\n    title: \"Web Search\",\n    readOnlyHint: true,\n    openWorldHint: true\n  }\n}\n\n// A destructive file deletion tool\n{\n  name: \"delete_file\",\n  description: \"Delete a file from the filesystem\",\n  inputSchema: {\n    type: \"object\",\n    properties: {\n      path: { type: \"string\" }\n    },\n    required: [\"path\"]\n  },\n  annotations: {\n    title: \"Delete File\",\n    readOnlyHint: false,\n    destructiveHint: true,\n    idempotentHint: true,\n    openWorldHint: false\n  }\n}\n\n// A non-destructive database record creation tool\n{\n  name: \"create_record\",\n  description: \"Create a new record in the database\",\n  inputSchema: {\n    type: \"object\",\n    properties: {\n      table: { type: \"string\" },\n      data: { type: \"object\" }\n    },\n    required: [\"table\", \"data\"]\n  },\n  annotations: {\n    title: \"Create Database Record\",\n    readOnlyHint: false,\n    destructiveHint: false,\n    idempotentHint: false,\n    openWorldHint: false\n  }\n}\n```\n\n### Integrating annotations in server implementation\n\n<Tabs>\n  <Tab title=\"TypeScript\">\n    ```typescript\n    server.setRequestHandler(ListToolsRequestSchema, async () => {\n      return {\n        tools: [{\n          name: \"calculate_sum\",\n          description: \"Add two numbers together\",\n          inputSchema: {\n            type: \"object\",\n            properties: {\n              a: { type: \"number\" },\n              b: { type: \"number\" }\n            },\n            required: [\"a\", \"b\"]\n          },\n          annotations: {\n            title: \"Calculate Sum\",\n            readOnlyHint: true,\n            openWorldHint: false\n          }\n        }]\n      };\n    });\n    ```\n  </Tab>\n\n  <Tab title=\"Python\">\n    ```python\n    from mcp.server.fastmcp import FastMCP\n\n    mcp = FastMCP(\"example-server\")\n\n    @mcp.tool(\n        annotations={\n            \"title\": \"Calculate Sum\",\n            \"readOnlyHint\": True,\n            \"openWorldHint\": False\n        }\n    )\n    async def calculate_sum(a: float, b: float) -> str:\n        \"\"\"Add two numbers together.\n\n        Args:\n            a: First number to add\n            b: Second number to add\n        \"\"\"\n        result = a + b\n        return str(result)\n    ```\n  </Tab>\n</Tabs>\n\n### Best practices for tool annotations\n\n1. **Be accurate about side effects**: Clearly indicate whether a tool modifies its environment and whether those modifications are destructive.\n\n2. **Use descriptive titles**: Provide human-friendly titles that clearly describe the tool's purpose.\n\n3. **Indicate idempotency properly**: Mark tools as idempotent only if repeated calls with the same arguments truly have no additional effect.\n\n4. **Set appropriate open/closed world hints**: Indicate whether a tool interacts with a closed system (like a database) or an open system (like the web).\n\n5. **Remember annotations are hints**: All properties in ToolAnnotations are hints and not guaranteed to provide a faithful description of tool behavior. Clients should never make security-critical decisions based solely on annotations.\n\n## Testing tools\n\nA comprehensive testing strategy for MCP tools should cover:\n\n* **Functional testing**: Verify tools execute correctly with valid inputs and handle invalid inputs appropriately\n* **Integration testing**: Test tool interaction with external systems using both real and mocked dependencies\n* **Security testing**: Validate authentication, authorization, input sanitization, and rate limiting\n* **Performance testing**: Check behavior under load, timeout handling, and resource cleanup\n* **Error handling**: Ensure tools properly report errors through the MCP protocol and clean up resources\n\n\n# Transports\nSource: https://modelcontextprotocol.io/docs/concepts/transports\n\nLearn about MCP's communication mechanisms\n\nTransports in the Model Context Protocol (MCP) provide the foundation for communication between clients and servers. A transport handles the underlying mechanics of how messages are sent and received.\n\n## Message Format\n\nMCP uses [JSON-RPC](https://www.jsonrpc.org/) 2.0 as its wire format. The transport layer is responsible for converting MCP protocol messages into JSON-RPC format for transmission and converting received JSON-RPC messages back into MCP protocol messages.\n\nThere are three types of JSON-RPC messages used:\n\n### Requests\n\n```typescript\n{\n  jsonrpc: \"2.0\",\n  id: number | string,\n  method: string,\n  params?: object\n}\n```\n\n### Responses\n\n```typescript\n{\n  jsonrpc: \"2.0\",\n  id: number | string,\n  result?: object,\n  error?: {\n    code: number,\n    message: string,\n    data?: unknown\n  }\n}\n```\n\n### Notifications\n\n```typescript\n{\n  jsonrpc: \"2.0\",\n  method: string,\n  params?: object\n}\n```\n\n## Built-in Transport Types\n\nMCP currently defines two standard transport mechanisms:\n\n### Standard Input/Output (stdio)\n\nThe stdio transport enables communication through standard input and output streams. This is particularly useful for local integrations and command-line tools.\n\nUse stdio when:\n\n* Building command-line tools\n* Implementing local integrations\n* Needing simple process communication\n* Working with shell scripts\n\n<Tabs>\n  <Tab title=\"TypeScript (Server)\">\n    ```typescript\n    const server = new Server({\n      name: \"example-server\",\n      version: \"1.0.0\"\n    }, {\n      capabilities: {}\n    });\n\n    const transport = new StdioServerTransport();\n    await server.connect(transport);\n    ```\n  </Tab>\n\n  <Tab title=\"TypeScript (Client)\">\n    ```typescript\n    const client = new Client({\n      name: \"example-client\",\n      version: \"1.0.0\"\n    }, {\n      capabilities: {}\n    });\n\n    const transport = new StdioClientTransport({\n      command: \"./server\",\n      args: [\"--option\", \"value\"]\n    });\n    await client.connect(transport);\n    ```\n  </Tab>\n\n  <Tab title=\"Python (Server)\">\n    ```python\n    app = Server(\"example-server\")\n\n    async with stdio_server() as streams:\n        await app.run(\n            streams[0],\n            streams[1],\n            app.create_initialization_options()\n        )\n    ```\n  </Tab>\n\n  <Tab title=\"Python (Client)\">\n    ```python\n    params = StdioServerParameters(\n        command=\"./server\",\n        args=[\"--option\", \"value\"]\n    )\n\n    async with stdio_client(params) as streams:\n        async with ClientSession(streams[0], streams[1]) as session:\n            await session.initialize()\n    ```\n  </Tab>\n</Tabs>\n\n### Streamable HTTP\n\nThe Streamable HTTP transport uses HTTP POST requests for client-to-server communication and optional Server-Sent Events (SSE) streams for server-to-client communication.\n\nUse Streamable HTTP when:\n\n* Building web-based integrations\n* Needing client-server communication over HTTP\n* Requiring stateful sessions\n* Supporting multiple concurrent clients\n* Implementing resumable connections\n\n#### How it Works\n\n1. **Client-to-Server Communication**: Every JSON-RPC message from client to server is sent as a new HTTP POST request to the MCP endpoint\n2. **Server Responses**: The server can respond either with:\n   * A single JSON response (`Content-Type: application/json`)\n   * An SSE stream (`Content-Type: text/event-stream`) for multiple messages\n3. **Server-to-Client Communication**: Servers can send requests/notifications to clients via:\n   * SSE streams initiated by client requests\n   * SSE streams from HTTP GET requests to the MCP endpoint\n\n<Tabs>\n  <Tab title=\"TypeScript (Server)\">\n    ```typescript\n    import express from \"express\";\n\n    const app = express();\n\n    const server = new Server({\n      name: \"example-server\",\n      version: \"1.0.0\"\n    }, {\n      capabilities: {}\n    });\n\n    // MCP endpoint handles both POST and GET\n    app.post(\"/mcp\", async (req, res) => {\n      // Handle JSON-RPC request\n      const response = await server.handleRequest(req.body);\n\n      // Return single response or SSE stream\n      if (needsStreaming) {\n        res.setHeader(\"Content-Type\", \"text/event-stream\");\n        // Send SSE events...\n      } else {\n        res.json(response);\n      }\n    });\n\n    app.get(\"/mcp\", (req, res) => {\n      // Optional: Support server-initiated SSE streams\n      res.setHeader(\"Content-Type\", \"text/event-stream\");\n      // Send server notifications/requests...\n    });\n\n    app.listen(3000);\n    ```\n  </Tab>\n\n  <Tab title=\"TypeScript (Client)\">\n    ```typescript\n    const client = new Client({\n      name: \"example-client\",\n      version: \"1.0.0\"\n    }, {\n      capabilities: {}\n    });\n\n    const transport = new HttpClientTransport(\n      new URL(\"http://localhost:3000/mcp\")\n    );\n    await client.connect(transport);\n    ```\n  </Tab>\n\n  <Tab title=\"Python (Server)\">\n    ```python\n    from mcp.server.http import HttpServerTransport\n    from starlette.applications import Starlette\n    from starlette.routing import Route\n\n    app = Server(\"example-server\")\n\n    async def handle_mcp(scope, receive, send):\n        if scope[\"method\"] == \"POST\":\n            # Handle JSON-RPC request\n            response = await app.handle_request(request_body)\n\n            if needs_streaming:\n                # Return SSE stream\n                await send_sse_response(send, response)\n            else:\n                # Return JSON response\n                await send_json_response(send, response)\n\n        elif scope[\"method\"] == \"GET\":\n            # Optional: Support server-initiated SSE streams\n            await send_sse_stream(send)\n\n    starlette_app = Starlette(\n        routes=[\n            Route(\"/mcp\", endpoint=handle_mcp, methods=[\"POST\", \"GET\"]),\n        ]\n    )\n    ```\n  </Tab>\n\n  <Tab title=\"Python (Client)\">\n    ```python\n    async with http_client(\"http://localhost:8000/mcp\") as transport:\n        async with ClientSession(transport[0], transport[1]) as session:\n            await session.initialize()\n    ```\n  </Tab>\n</Tabs>\n\n#### Session Management\n\nStreamable HTTP supports stateful sessions to maintain context across multiple requests:\n\n1. **Session Initialization**: Servers may assign a session ID during initialization by including it in an `Mcp-Session-Id` header\n2. **Session Persistence**: Clients must include the session ID in all subsequent requests using the `Mcp-Session-Id` header\n3. **Session Termination**: Sessions can be explicitly terminated by sending an HTTP DELETE request with the session ID\n\nExample session flow:\n\n```typescript\n// Server assigns session ID during initialization\napp.post(\"/mcp\", (req, res) => {\n  if (req.body.method === \"initialize\") {\n    const sessionId = generateSecureId();\n    res.setHeader(\"Mcp-Session-Id\", sessionId);\n    // Store session state...\n  }\n  // Handle request...\n});\n\n// Client includes session ID in subsequent requests\nfetch(\"/mcp\", {\n  method: \"POST\",\n  headers: {\n    \"Content-Type\": \"application/json\",\n    \"Mcp-Session-Id\": sessionId,\n  },\n  body: JSON.stringify(request),\n});\n```\n\n#### Resumability and Redelivery\n\nTo support resuming broken connections, Streamable HTTP provides:\n\n1. **Event IDs**: Servers can attach unique IDs to SSE events for tracking\n2. **Resume from Last Event**: Clients can resume by sending the `Last-Event-ID` header\n3. **Message Replay**: Servers can replay missed messages from the disconnection point\n\nThis ensures reliable message delivery even with unstable network connections.\n\n#### Security Considerations\n\nWhen implementing Streamable HTTP transport, follow these security best practices:\n\n1. **Validate Origin Headers**: Always validate the `Origin` header on all incoming connections to prevent DNS rebinding attacks\n2. **Bind to Localhost**: When running locally, bind only to localhost (127.0.0.1) rather than all network interfaces (0.0.0.0)\n3. **Implement Authentication**: Use proper authentication for all connections\n4. **Use HTTPS**: Always use TLS/HTTPS for production deployments\n5. **Validate Session IDs**: Ensure session IDs are cryptographically secure and properly validated\n\nWithout these protections, attackers could use DNS rebinding to interact with local MCP servers from remote websites.\n\n### Server-Sent Events (SSE) - Deprecated\n\n<Note>\n  SSE as a standalone transport is deprecated as of protocol version 2024-11-05.\n  It has been replaced by Streamable HTTP, which incorporates SSE as an optional\n  streaming mechanism. For backwards compatibility information, see the\n  [backwards compatibility](#backwards-compatibility) section below.\n</Note>\n\nThe legacy SSE transport enabled server-to-client streaming with HTTP POST requests for client-to-server communication.\n\nPreviously used when:\n\n* Only server-to-client streaming is needed\n* Working with restricted networks\n* Implementing simple updates\n\n#### Legacy Security Considerations\n\nThe deprecated SSE transport had similar security considerations to Streamable HTTP, particularly regarding DNS rebinding attacks. These same protections should be applied when using SSE streams within the Streamable HTTP transport.\n\n<Tabs>\n  <Tab title=\"TypeScript (Server)\">\n    ```typescript\n    import express from \"express\";\n\n    const app = express();\n\n    const server = new Server({\n      name: \"example-server\",\n      version: \"1.0.0\"\n    }, {\n      capabilities: {}\n    });\n\n    let transport: SSEServerTransport | null = null;\n\n    app.get(\"/sse\", (req, res) => {\n      transport = new SSEServerTransport(\"/messages\", res);\n      server.connect(transport);\n    });\n\n    app.post(\"/messages\", (req, res) => {\n      if (transport) {\n        transport.handlePostMessage(req, res);\n      }\n    });\n\n    app.listen(3000);\n    ```\n  </Tab>\n\n  <Tab title=\"TypeScript (Client)\">\n    ```typescript\n    const client = new Client({\n      name: \"example-client\",\n      version: \"1.0.0\"\n    }, {\n      capabilities: {}\n    });\n\n    const transport = new SSEClientTransport(\n      new URL(\"http://localhost:3000/sse\")\n    );\n    await client.connect(transport);\n    ```\n  </Tab>\n\n  <Tab title=\"Python (Server)\">\n    ```python\n    from mcp.server.sse import SseServerTransport\n    from starlette.applications import Starlette\n    from starlette.routing import Route\n\n    app = Server(\"example-server\")\n    sse = SseServerTransport(\"/messages\")\n\n    async def handle_sse(scope, receive, send):\n        async with sse.connect_sse(scope, receive, send) as streams:\n            await app.run(streams[0], streams[1], app.create_initialization_options())\n\n    async def handle_messages(scope, receive, send):\n        await sse.handle_post_message(scope, receive, send)\n\n    starlette_app = Starlette(\n        routes=[\n            Route(\"/sse\", endpoint=handle_sse),\n            Route(\"/messages\", endpoint=handle_messages, methods=[\"POST\"]),\n        ]\n    )\n    ```\n  </Tab>\n\n  <Tab title=\"Python (Client)\">\n    ```python\n    async with sse_client(\"http://localhost:8000/sse\") as streams:\n        async with ClientSession(streams[0], streams[1]) as session:\n            await session.initialize()\n    ```\n  </Tab>\n</Tabs>\n\n## Custom Transports\n\nMCP makes it easy to implement custom transports for specific needs. Any transport implementation just needs to conform to the Transport interface:\n\nYou can implement custom transports for:\n\n* Custom network protocols\n* Specialized communication channels\n* Integration with existing systems\n* Performance optimization\n\n<Tabs>\n  <Tab title=\"TypeScript\">\n    ```typescript\n    interface Transport {\n      // Start processing messages\n      start(): Promise<void>;\n\n      // Send a JSON-RPC message\n      send(message: JSONRPCMessage): Promise<void>;\n\n      // Close the connection\n      close(): Promise<void>;\n\n      // Callbacks\n      onclose?: () => void;\n      onerror?: (error: Error) => void;\n      onmessage?: (message: JSONRPCMessage) => void;\n    }\n    ```\n  </Tab>\n\n  <Tab title=\"Python\">\n    Note that while MCP Servers are often implemented with asyncio, we recommend\n    implementing low-level interfaces like transports with `anyio` for wider compatibility.\n\n    ```python\n    @contextmanager\n    async def create_transport(\n        read_stream: MemoryObjectReceiveStream[JSONRPCMessage | Exception],\n        write_stream: MemoryObjectSendStream[JSONRPCMessage]\n    ):\n        \"\"\"\n        Transport interface for MCP.\n\n        Args:\n            read_stream: Stream to read incoming messages from\n            write_stream: Stream to write outgoing messages to\n        \"\"\"\n        async with anyio.create_task_group() as tg:\n            try:\n                # Start processing messages\n                tg.start_soon(lambda: process_messages(read_stream))\n\n                # Send messages\n                async with write_stream:\n                    yield write_stream\n\n            except Exception as exc:\n                # Handle errors\n                raise exc\n            finally:\n                # Clean up\n                tg.cancel_scope.cancel()\n                await write_stream.aclose()\n                await read_stream.aclose()\n    ```\n  </Tab>\n</Tabs>\n\n## Error Handling\n\nTransport implementations should handle various error scenarios:\n\n1. Connection errors\n2. Message parsing errors\n3. Protocol errors\n4. Network timeouts\n5. Resource cleanup\n\nExample error handling:\n\n<Tabs>\n  <Tab title=\"TypeScript\">\n    ```typescript\n    class ExampleTransport implements Transport {\n      async start() {\n        try {\n          // Connection logic\n        } catch (error) {\n          this.onerror?.(new Error(`Failed to connect: ${error}`));\n          throw error;\n        }\n      }\n\n      async send(message: JSONRPCMessage) {\n        try {\n          // Sending logic\n        } catch (error) {\n          this.onerror?.(new Error(`Failed to send message: ${error}`));\n          throw error;\n        }\n      }\n    }\n    ```\n  </Tab>\n\n  <Tab title=\"Python\">\n    Note that while MCP Servers are often implemented with asyncio, we recommend\n    implementing low-level interfaces like transports with `anyio` for wider compatibility.\n\n    ```python\n    @contextmanager\n    async def example_transport(scope: Scope, receive: Receive, send: Send):\n        try:\n            # Create streams for bidirectional communication\n            read_stream_writer, read_stream = anyio.create_memory_object_stream(0)\n            write_stream, write_stream_reader = anyio.create_memory_object_stream(0)\n\n            async def message_handler():\n                try:\n                    async with read_stream_writer:\n                        # Message handling logic\n                        pass\n                except Exception as exc:\n                    logger.error(f\"Failed to handle message: {exc}\")\n                    raise exc\n\n            async with anyio.create_task_group() as tg:\n                tg.start_soon(message_handler)\n                try:\n                    # Yield streams for communication\n                    yield read_stream, write_stream\n                except Exception as exc:\n                    logger.error(f\"Transport error: {exc}\")\n                    raise exc\n                finally:\n                    tg.cancel_scope.cancel()\n                    await write_stream.aclose()\n                    await read_stream.aclose()\n        except Exception as exc:\n            logger.error(f\"Failed to initialize transport: {exc}\")\n            raise exc\n    ```\n  </Tab>\n</Tabs>\n\n## Best Practices\n\nWhen implementing or using MCP transport:\n\n1. Handle connection lifecycle properly\n2. Implement proper error handling\n3. Clean up resources on connection close\n4. Use appropriate timeouts\n5. Validate messages before sending\n6. Log transport events for debugging\n7. Implement reconnection logic when appropriate\n8. Handle backpressure in message queues\n9. Monitor connection health\n10. Implement proper security measures\n\n## Security Considerations\n\nWhen implementing transport:\n\n### Authentication and Authorization\n\n* Implement proper authentication mechanisms\n* Validate client credentials\n* Use secure token handling\n* Implement authorization checks\n\n### Data Security\n\n* Use TLS for network transport\n* Encrypt sensitive data\n* Validate message integrity\n* Implement message size limits\n* Sanitize input data\n\n### Network Security\n\n* Implement rate limiting\n* Use appropriate timeouts\n* Handle denial of service scenarios\n* Monitor for unusual patterns\n* Implement proper firewall rules\n* For HTTP-based transports (including Streamable HTTP), validate Origin headers to prevent DNS rebinding attacks\n* For local servers, bind only to localhost (127.0.0.1) instead of all interfaces (0.0.0.0)\n\n## Debugging Transport\n\nTips for debugging transport issues:\n\n1. Enable debug logging\n2. Monitor message flow\n3. Check connection states\n4. Validate message formats\n5. Test error scenarios\n6. Use network analysis tools\n7. Implement health checks\n8. Monitor resource usage\n9. Test edge cases\n10. Use proper error tracking\n\n## Backwards Compatibility\n\nTo maintain compatibility between different protocol versions:\n\n### For Servers Supporting Older Clients\n\nServers wanting to support clients using the deprecated HTTP+SSE transport should:\n\n1. Host both the old SSE and POST endpoints alongside the new MCP endpoint\n2. Handle initialization requests on both endpoints\n3. Maintain separate handling logic for each transport type\n\n### For Clients Supporting Older Servers\n\nClients wanting to support servers using the deprecated transport should:\n\n1. Accept server URLs that may use either transport\n2. Attempt to POST an `InitializeRequest` with proper `Accept` headers:\n   * If successful, use Streamable HTTP transport\n   * If it fails with 4xx status, fall back to legacy SSE transport\n3. Issue a GET request expecting an SSE stream with `endpoint` event for legacy servers\n\nExample compatibility detection:\n\n```typescript\nasync function detectTransport(serverUrl: string): Promise<TransportType> {\n  try {\n    // Try Streamable HTTP first\n    const response = await fetch(serverUrl, {\n      method: \"POST\",\n      headers: {\n        \"Content-Type\": \"application/json\",\n        Accept: \"application/json, text/event-stream\",\n      },\n      body: JSON.stringify({\n        jsonrpc: \"2.0\",\n        method: \"initialize\",\n        params: {\n          /* ... */\n        },\n      }),\n    });\n\n    if (response.ok) {\n      return \"streamable-http\";\n    }\n  } catch (error) {\n    // Fall back to legacy SSE\n    const sseResponse = await fetch(serverUrl, {\n      method: \"GET\",\n      headers: { Accept: \"text/event-stream\" },\n    });\n\n    if (sseResponse.ok) {\n      return \"legacy-sse\";\n    }\n  }\n\n  throw new Error(\"Unsupported transport\");\n}\n```\n\n\n# Debugging\nSource: https://modelcontextprotocol.io/docs/tools/debugging\n\nA comprehensive guide to debugging Model Context Protocol (MCP) integrations\n\nEffective debugging is essential when developing MCP servers or integrating them with applications. This guide covers the debugging tools and approaches available in the MCP ecosystem.\n\n<Info>\n  This guide is for macOS. Guides for other platforms are coming soon.\n</Info>\n\n## Debugging tools overview\n\nMCP provides several tools for debugging at different levels:\n\n1. **MCP Inspector**\n\n   * Interactive debugging interface\n   * Direct server testing\n   * See the [Inspector guide](/docs/tools/inspector) for details\n\n2. **Claude Desktop Developer Tools**\n\n   * Integration testing\n   * Log collection\n   * Chrome DevTools integration\n\n3. **Server Logging**\n   * Custom logging implementations\n   * Error tracking\n   * Performance monitoring\n\n## Debugging in Claude Desktop\n\n### Checking server status\n\nThe Claude.app interface provides basic server status information:\n\n1. Click the <img src=\"https://mintlify.s3.us-west-1.amazonaws.com/mcp/images/claude-desktop-mcp-plug-icon.svg\" style={{display: 'inline', margin: 0, height: '1.3em'}} /> icon to view:\n\n   * Connected servers\n   * Available prompts and resources\n\n2. Click the \"Search and tools\" <img src=\"https://mintlify.s3.us-west-1.amazonaws.com/mcp/images/claude-desktop-mcp-slider.svg\" style={{display: 'inline', margin: 0, height: '1.3em'}} /> icon to view:\n   * Tools made available to the model\n\n### Viewing logs\n\nReview detailed MCP logs from Claude Desktop:\n\n```bash\n# Follow logs in real-time\ntail -n 20 -F ~/Library/Logs/Claude/mcp*.log\n```\n\nThe logs capture:\n\n* Server connection events\n* Configuration issues\n* Runtime errors\n* Message exchanges\n\n### Using Chrome DevTools\n\nAccess Chrome's developer tools inside Claude Desktop to investigate client-side errors:\n\n1. Create a `developer_settings.json` file with `allowDevTools` set to true:\n\n```bash\necho '{\"allowDevTools\": true}' > ~/Library/Application\\ Support/Claude/developer_settings.json\n```\n\n2. Open DevTools: `Command-Option-Shift-i`\n\nNote: You'll see two DevTools windows:\n\n* Main content window\n* App title bar window\n\nUse the Console panel to inspect client-side errors.\n\nUse the Network panel to inspect:\n\n* Message payloads\n* Connection timing\n\n## Common issues\n\n### Working directory\n\nWhen using MCP servers with Claude Desktop:\n\n* The working directory for servers launched via `claude_desktop_config.json` may be undefined (like `/` on macOS) since Claude Desktop could be started from anywhere\n* Always use absolute paths in your configuration and `.env` files to ensure reliable operation\n* For testing servers directly via command line, the working directory will be where you run the command\n\nFor example in `claude_desktop_config.json`, use:\n\n```json\n{\n  \"command\": \"npx\",\n  \"args\": [\n    \"-y\",\n    \"@modelcontextprotocol/server-filesystem\",\n    \"/Users/username/data\"\n  ]\n}\n```\n\nInstead of relative paths like `./data`\n\n### Environment variables\n\nMCP servers inherit only a subset of environment variables automatically, like `USER`, `HOME`, and `PATH`.\n\nTo override the default variables or provide your own, you can specify an `env` key in `claude_desktop_config.json`:\n\n```json\n{\n  \"myserver\": {\n    \"command\": \"mcp-server-myapp\",\n    \"env\": {\n      \"MYAPP_API_KEY\": \"some_key\"\n    }\n  }\n}\n```\n\n### Server initialization\n\nCommon initialization problems:\n\n1. **Path Issues**\n\n   * Incorrect server executable path\n   * Missing required files\n   * Permission problems\n   * Try using an absolute path for `command`\n\n2. **Configuration Errors**\n\n   * Invalid JSON syntax\n   * Missing required fields\n   * Type mismatches\n\n3. **Environment Problems**\n   * Missing environment variables\n   * Incorrect variable values\n   * Permission restrictions\n\n### Connection problems\n\nWhen servers fail to connect:\n\n1. Check Claude Desktop logs\n2. Verify server process is running\n3. Test standalone with [Inspector](/docs/tools/inspector)\n4. Verify protocol compatibility\n\n## Implementing logging\n\n### Server-side logging\n\nWhen building a server that uses the local stdio [transport](/docs/concepts/transports), all messages logged to stderr (standard error) will be captured by the host application (e.g., Claude Desktop) automatically.\n\n<Warning>\n  Local MCP servers should not log messages to stdout (standard out), as this will interfere with protocol operation.\n</Warning>\n\nFor all [transports](/docs/concepts/transports), you can also provide logging to the client by sending a log message notification:\n\n<Tabs>\n  <Tab title=\"Python\">\n    ```python\n    server.request_context.session.send_log_message(\n      level=\"info\",\n      data=\"Server started successfully\",\n    )\n    ```\n  </Tab>\n\n  <Tab title=\"TypeScript\">\n    ```typescript\n    server.sendLoggingMessage({\n      level: \"info\",\n      data: \"Server started successfully\",\n    });\n    ```\n  </Tab>\n</Tabs>\n\nImportant events to log:\n\n* Initialization steps\n* Resource access\n* Tool execution\n* Error conditions\n* Performance metrics\n\n### Client-side logging\n\nIn client applications:\n\n1. Enable debug logging\n2. Monitor network traffic\n3. Track message exchanges\n4. Record error states\n\n## Debugging workflow\n\n### Development cycle\n\n1. Initial Development\n\n   * Use [Inspector](/docs/tools/inspector) for basic testing\n   * Implement core functionality\n   * Add logging points\n\n2. Integration Testing\n   * Test in Claude Desktop\n   * Monitor logs\n   * Check error handling\n\n### Testing changes\n\nTo test changes efficiently:\n\n* **Configuration changes**: Restart Claude Desktop\n* **Server code changes**: Use Command-R to reload\n* **Quick iteration**: Use [Inspector](/docs/tools/inspector) during development\n\n## Best practices\n\n### Logging strategy\n\n1. **Structured Logging**\n\n   * Use consistent formats\n   * Include context\n   * Add timestamps\n   * Track request IDs\n\n2. **Error Handling**\n\n   * Log stack traces\n   * Include error context\n   * Track error patterns\n   * Monitor recovery\n\n3. **Performance Tracking**\n   * Log operation timing\n   * Monitor resource usage\n   * Track message sizes\n   * Measure latency\n\n### Security considerations\n\nWhen debugging:\n\n1. **Sensitive Data**\n\n   * Sanitize logs\n   * Protect credentials\n   * Mask personal information\n\n2. **Access Control**\n   * Verify permissions\n   * Check authentication\n   * Monitor access patterns\n\n## Getting help\n\nWhen encountering issues:\n\n1. **First Steps**\n\n   * Check server logs\n   * Test with [Inspector](/docs/tools/inspector)\n   * Review configuration\n   * Verify environment\n\n2. **Support Channels**\n\n   * GitHub issues\n   * GitHub discussions\n\n3. **Providing Information**\n   * Log excerpts\n   * Configuration files\n   * Steps to reproduce\n   * Environment details\n\n## Next steps\n\n<CardGroup cols={2}>\n  <Card title=\"MCP Inspector\" icon=\"magnifying-glass\" href=\"/docs/tools/inspector\">\n    Learn to use the MCP Inspector\n  </Card>\n</CardGroup>\n\n\n# Inspector\nSource: https://modelcontextprotocol.io/docs/tools/inspector\n\nIn-depth guide to using the MCP Inspector for testing and debugging Model Context Protocol servers\n\nThe [MCP Inspector](https://github.com/modelcontextprotocol/inspector) is an interactive developer tool for testing and debugging MCP servers. While the [Debugging Guide](/docs/tools/debugging) covers the Inspector as part of the overall debugging toolkit, this document provides a detailed exploration of the Inspector's features and capabilities.\n\n## Getting started\n\n### Installation and basic usage\n\nThe Inspector runs directly through `npx` without requiring installation:\n\n```bash\nnpx @modelcontextprotocol/inspector <command>\n```\n\n```bash\nnpx @modelcontextprotocol/inspector <command> <arg1> <arg2>\n```\n\n#### Inspecting servers from NPM or PyPi\n\nA common way to start server packages from [NPM](https://npmjs.com) or [PyPi](https://pypi.com).\n\n<Tabs>\n  <Tab title=\"NPM package\">\n    ```bash\n    npx -y @modelcontextprotocol/inspector npx <package-name> <args>\n    # For example\n    npx -y @modelcontextprotocol/inspector npx @modelcontextprotocol/server-filesystem /Users/username/Desktop\n    ```\n  </Tab>\n\n  <Tab title=\"PyPi package\">\n    ```bash\n    npx @modelcontextprotocol/inspector uvx <package-name> <args>\n    # For example\n    npx @modelcontextprotocol/inspector uvx mcp-server-git --repository ~/code/mcp/servers.git\n    ```\n  </Tab>\n</Tabs>\n\n#### Inspecting locally developed servers\n\nTo inspect servers locally developed or downloaded as a repository, the most common\nway is:\n\n<Tabs>\n  <Tab title=\"TypeScript\">\n    ```bash\n    npx @modelcontextprotocol/inspector node path/to/server/index.js args...\n    ```\n  </Tab>\n\n  <Tab title=\"Python\">\n    ```bash\n    npx @modelcontextprotocol/inspector \\\n      uv \\\n      --directory path/to/server \\\n      run \\\n      package-name \\\n      args...\n    ```\n  </Tab>\n</Tabs>\n\nPlease carefully read any attached README for the most accurate instructions.\n\n## Feature overview\n\n<Frame caption=\"The MCP Inspector interface\">\n  <img src=\"https://mintlify.s3.us-west-1.amazonaws.com/mcp/images/mcp-inspector.png\" />\n</Frame>\n\nThe Inspector provides several features for interacting with your MCP server:\n\n### Server connection pane\n\n* Allows selecting the [transport](/docs/concepts/transports) for connecting to the server\n* For local servers, supports customizing the command-line arguments and environment\n\n### Resources tab\n\n* Lists all available resources\n* Shows resource metadata (MIME types, descriptions)\n* Allows resource content inspection\n* Supports subscription testing\n\n### Prompts tab\n\n* Displays available prompt templates\n* Shows prompt arguments and descriptions\n* Enables prompt testing with custom arguments\n* Previews generated messages\n\n### Tools tab\n\n* Lists available tools\n* Shows tool schemas and descriptions\n* Enables tool testing with custom inputs\n* Displays tool execution results\n\n### Notifications pane\n\n* Presents all logs recorded from the server\n* Shows notifications received from the server\n\n## Best practices\n\n### Development workflow\n\n1. Start Development\n\n   * Launch Inspector with your server\n   * Verify basic connectivity\n   * Check capability negotiation\n\n2. Iterative testing\n\n   * Make server changes\n   * Rebuild the server\n   * Reconnect the Inspector\n   * Test affected features\n   * Monitor messages\n\n3. Test edge cases\n   * Invalid inputs\n   * Missing prompt arguments\n   * Concurrent operations\n   * Verify error handling and error responses\n\n## Next steps\n\n<CardGroup cols={2}>\n  <Card title=\"Inspector Repository\" icon=\"github\" href=\"https://github.com/modelcontextprotocol/inspector\">\n    Check out the MCP Inspector source code\n  </Card>\n\n  <Card title=\"Debugging Guide\" icon=\"bug\" href=\"/docs/tools/debugging\">\n    Learn about broader debugging strategies\n  </Card>\n</CardGroup>\n\n\n# Example Servers\nSource: https://modelcontextprotocol.io/examples\n\nA list of example servers and implementations\n\nThis page showcases various Model Context Protocol (MCP) servers that demonstrate the protocol's capabilities and versatility. These servers enable Large Language Models (LLMs) to securely access tools and data sources.\n\n## Reference implementations\n\nThese official reference servers demonstrate core MCP features and SDK usage:\n\n### Current reference servers\n\n* **[Filesystem](https://github.com/modelcontextprotocol/servers/tree/main/src/filesystem)** - Secure file operations with configurable access controls\n* **[Fetch](https://github.com/modelcontextprotocol/servers/tree/main/src/fetch)** - Web content fetching and conversion optimized for LLM usage\n* **[Memory](https://github.com/modelcontextprotocol/servers/tree/main/src/memory)** - Knowledge graph-based persistent memory system\n* **[Sequential Thinking](https://github.com/modelcontextprotocol/servers/tree/main/src/sequentialthinking)** - Dynamic problem-solving through thought sequences\n\n### Archived servers (historical reference)\n\n⚠️ **Note**: The following servers have been moved to the [servers-archived repository](https://github.com/modelcontextprotocol/servers-archived) and are no longer actively maintained. They are provided for historical reference only.\n\n#### Data and file systems\n\n* **[PostgreSQL](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/postgres)** - Read-only database access with schema inspection capabilities\n* **[SQLite](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/sqlite)** - Database interaction and business intelligence features\n* **[Google Drive](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/gdrive)** - File access and search capabilities for Google Drive\n\n#### Development tools\n\n* **[Git](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/git)** - Tools to read, search, and manipulate Git repositories\n* **[GitHub](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/github)** - Repository management, file operations, and GitHub API integration\n* **[GitLab](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/gitlab)** - GitLab API integration enabling project management\n* **[Sentry](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/sentry)** - Retrieving and analyzing issues from Sentry.io\n\n#### Web and browser automation\n\n* **[Brave Search](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/brave-search)** - Web and local search using Brave's Search API\n* **[Puppeteer](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/puppeteer)** - Browser automation and web scraping capabilities\n\n#### Productivity and communication\n\n* **[Slack](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/slack)** - Channel management and messaging capabilities\n* **[Google Maps](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/google-maps)** - Location services, directions, and place details\n\n#### AI and specialized tools\n\n* **[EverArt](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/everart)** - AI image generation using various models\n* **[AWS KB Retrieval](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/aws-kb-retrieval-server)** - Retrieval from AWS Knowledge Base using Bedrock Agent Runtime\n\n## Official integrations\n\nVisit the [MCP Servers Repository (Official Integrations section)](https://github.com/modelcontextprotocol/servers?tab=readme-ov-file#%EF%B8%8F-official-integrations) for a list of MCP servers maintained by companies for their platforms.\n\n## Community implementations\n\nVisit the [MCP Servers Repository (Community section)](https://github.com/modelcontextprotocol/servers?tab=readme-ov-file#-community-servers) for a list of MCP servers maintained by community members.\n\n## Getting started\n\n### Using reference servers\n\nTypeScript-based servers can be used directly with `npx`:\n\n```bash\nnpx -y @modelcontextprotocol/server-memory\n```\n\nPython-based servers can be used with `uvx` (recommended) or `pip`:\n\n```bash\n# Using uvx\nuvx mcp-server-git\n\n# Using pip\npip install mcp-server-git\npython -m mcp_server_git\n```\n\n### Configuring with Claude\n\nTo use an MCP server with Claude, add it to your configuration:\n\n```json\n{\n  \"mcpServers\": {\n    \"memory\": {\n      \"command\": \"npx\",\n      \"args\": [\"-y\", \"@modelcontextprotocol/server-memory\"]\n    },\n    \"filesystem\": {\n      \"command\": \"npx\",\n      \"args\": [\n        \"-y\",\n        \"@modelcontextprotocol/server-filesystem\",\n        \"/path/to/allowed/files\"\n      ]\n    },\n    \"github\": {\n      \"command\": \"npx\",\n      \"args\": [\"-y\", \"@modelcontextprotocol/server-github\"],\n      \"env\": {\n        \"GITHUB_PERSONAL_ACCESS_TOKEN\": \"<YOUR_TOKEN>\"\n      }\n    }\n  }\n}\n```\n\n## Additional resources\n\nVisit the [MCP Servers Repository (Resources section)](https://github.com/modelcontextprotocol/servers?tab=readme-ov-file#-resources) for a collection of other resources and projects related to MCP.\n\nVisit our [GitHub Discussions](https://github.com/orgs/modelcontextprotocol/discussions) to engage with the MCP community.\n\n\n# FAQs\nSource: https://modelcontextprotocol.io/faqs\n\nExplaining MCP and why it matters in simple terms\n\n## What is MCP?\n\nMCP (Model Context Protocol) is a standard way for AI applications and agents to connect to and work with your data sources (e.g. local files, databases, or content repositories) and tools (e.g. GitHub, Google Maps, or Puppeteer).\n\nThink of MCP as a universal adapter for AI applications, similar to what USB-C is for physical devices. USB-C acts as a universal adapter to connect devices to various peripherals and accessories. Similarly, MCP provides a standardized way to connect AI applications to different data and tools.\n\nBefore USB-C, you needed different cables for different connections. Similarly, before MCP, developers had to build custom connections to each data source or tool they wanted their AI application to work with—a time-consuming process that often resulted in limited functionality. Now, with MCP, developers can easily add connections to their AI applications, making their applications much more powerful from day one.\n\n## Why does MCP matter?\n\n### For AI application users\n\nMCP means your AI applications can access the information and tools you work with every day, making them much more helpful. Rather than AI being limited to what it already knows about, it can now understand your specific documents, data, and work context.\n\nFor example, by using MCP servers, applications can access your personal documents from Google Drive or data about your codebase from GitHub, providing more personalized and contextually relevant assistance.\n\nImagine asking an AI assistant: \"Summarize last week's team meeting notes and schedule follow-ups with everyone.\"\n\nBy using connections to data sources powered by MCP, the AI assistant can:\n\n* Connect to your Google Drive through an MCP server to read meeting notes\n* Understand who needs follow-ups based on the notes\n* Connect to your calendar through another MCP server to schedule the meetings automatically\n\n### For developers\n\nMCP reduces development time and complexity when building AI applications that need to access various data sources. With MCP, developers can focus on building great AI experiences rather than repeatedly creating custom connectors.\n\nTraditionally, connecting applications with data sources required building custom, one-off connections for each data source and each application. This created significant duplicative work—every developer wanting to connect their AI application to Google Drive or Slack needed to build their own connection.\n\nMCP simplifies this by enabling developers to build MCP servers for data sources that are then reusable by various applications. For example, using the open source Google Drive MCP server, many different applications can access data from Google Drive without each developer needing to build a custom connection.\n\nThis open source ecosystem of MCP servers means developers can leverage existing work rather than starting from scratch, making it easier to build powerful AI applications that seamlessly integrate with the tools and data sources their users already rely on.\n\n## How does MCP work?\n\n<Frame>\n  <img src=\"https://mintlify.s3.us-west-1.amazonaws.com/mcp/images/mcp-simple-diagram.png\" />\n</Frame>\n\nMCP creates a bridge between your AI applications and your data through a straightforward system:\n\n* **MCP servers** connect to your data sources and tools (like Google Drive or Slack)\n* **MCP clients** are run by AI applications (like Claude Desktop) to connect them to these servers\n* When you give permission, your AI application discovers available MCP servers\n* The AI model can then use these connections to read information and take actions\n\nThis modular system means new capabilities can be added without changing AI applications themselves—just like adding new accessories to your computer without upgrading your entire system.\n\n## Who creates and maintains MCP servers?\n\nMCP servers are developed and maintained by:\n\n* Developers at Anthropic who build servers for common tools and data sources\n* Open source contributors who create servers for tools they use\n* Enterprise development teams building servers for their internal systems\n* Software providers making their applications AI-ready\n\nOnce an open source MCP server is created for a data source, it can be used by any MCP-compatible AI application, creating a growing ecosystem of connections. See our [list of example servers](https://modelcontextprotocol.io/examples), or [get started building your own server](https://modelcontextprotocol.io/quickstart/server).\n\n\n# Introduction\nSource: https://modelcontextprotocol.io/introduction\n\nGet started with the Model Context Protocol (MCP)\n\nMCP is an open protocol that standardizes how applications provide context to LLMs. Think of MCP like a USB-C port for AI applications. Just as USB-C provides a standardized way to connect your devices to various peripherals and accessories, MCP provides a standardized way to connect AI models to different data sources and tools.\n\n## Why MCP?\n\nMCP helps you build agents and complex workflows on top of LLMs. LLMs frequently need to integrate with data and tools, and MCP provides:\n\n* A growing list of pre-built integrations that your LLM can directly plug into\n* The flexibility to switch between LLM providers and vendors\n* Best practices for securing your data within your infrastructure\n\n### General architecture\n\nAt its core, MCP follows a client-server architecture where a host application can connect to multiple servers:\n\n```mermaid\nflowchart LR\n    subgraph \"Your Computer\"\n        Host[\"Host with MCP Client\\n(Claude, IDEs, Tools)\"]\n        S1[\"MCP Server A\"]\n        S2[\"MCP Server B\"]\n        S3[\"MCP Server C\"]\n        Host <-->|\"MCP Protocol\"| S1\n        Host <-->|\"MCP Protocol\"| S2\n        Host <-->|\"MCP Protocol\"| S3\n        S1 <--> D1[(\"Local\\nData Source A\")]\n        S2 <--> D2[(\"Local\\nData Source B\")]\n    end\n    subgraph \"Internet\"\n        S3 <-->|\"Web APIs\"| D3[(\"Remote\\nService C\")]\n    end\n```\n\n* **MCP Hosts**: Programs like Claude Desktop, IDEs, or AI tools that want to access data through MCP\n* **MCP Clients**: Protocol clients that maintain 1:1 connections with servers\n* **MCP Servers**: Lightweight programs that each expose specific capabilities through the standardized Model Context Protocol\n* **Local Data Sources**: Your computer's files, databases, and services that MCP servers can securely access\n* **Remote Services**: External systems available over the internet (e.g., through APIs) that MCP servers can connect to\n\n## Get started\n\nChoose the path that best fits your needs:\n\n### Quick Starts\n\n<CardGroup cols={2}>\n  <Card title=\"For Server Developers\" icon=\"bolt\" href=\"/quickstart/server\">\n    Get started building your own server to use in Claude for Desktop and other\n    clients\n  </Card>\n\n  <Card title=\"For Client Developers\" icon=\"bolt\" href=\"/quickstart/client\">\n    Get started building your own client that can integrate with all MCP servers\n  </Card>\n\n  <Card title=\"For Claude Desktop Users\" icon=\"bolt\" href=\"/quickstart/user\">\n    Get started using pre-built servers in Claude for Desktop\n  </Card>\n</CardGroup>\n\n### Examples\n\n<CardGroup cols={2}>\n  <Card title=\"Example Servers\" icon=\"grid\" href=\"/examples\">\n    Check out our gallery of official MCP servers and implementations\n  </Card>\n\n  <Card title=\"Example Clients\" icon=\"cubes\" href=\"/clients\">\n    View the list of clients that support MCP integrations\n  </Card>\n</CardGroup>\n\n## Tutorials\n\n<CardGroup cols={2}>\n  <Card title=\"Building MCP with LLMs\" icon=\"comments\" href=\"/tutorials/building-mcp-with-llms\">\n    Learn how to use LLMs like Claude to speed up your MCP development\n  </Card>\n\n  <Card title=\"Debugging Guide\" icon=\"bug\" href=\"/docs/tools/debugging\">\n    Learn how to effectively debug MCP servers and integrations\n  </Card>\n\n  <Card title=\"MCP Inspector\" icon=\"magnifying-glass\" href=\"/docs/tools/inspector\">\n    Test and inspect your MCP servers with our interactive debugging tool\n  </Card>\n\n  <Card title=\"MCP Workshop (Video, 2hr)\" icon=\"person-chalkboard\" href=\"https://www.youtube.com/watch?v=kQmXtrmQ5Zg\">\n    <iframe src=\"https://www.youtube.com/embed/kQmXtrmQ5Zg\" />\n  </Card>\n</CardGroup>\n\n## Explore MCP\n\nDive deeper into MCP's core concepts and capabilities:\n\n<CardGroup cols={2}>\n  <Card title=\"Core architecture\" icon=\"sitemap\" href=\"/docs/concepts/architecture\">\n    Understand how MCP connects clients, servers, and LLMs\n  </Card>\n\n  <Card title=\"Resources\" icon=\"database\" href=\"/docs/concepts/resources\">\n    Expose data and content from your servers to LLMs\n  </Card>\n\n  <Card title=\"Prompts\" icon=\"message\" href=\"/docs/concepts/prompts\">\n    Create reusable prompt templates and workflows\n  </Card>\n\n  <Card title=\"Tools\" icon=\"wrench\" href=\"/docs/concepts/tools\">\n    Enable LLMs to perform actions through your server\n  </Card>\n\n  <Card title=\"Sampling\" icon=\"robot\" href=\"/docs/concepts/sampling\">\n    Let your servers request completions from LLMs\n  </Card>\n\n  <Card title=\"Transports\" icon=\"network-wired\" href=\"/docs/concepts/transports\">\n    Learn about MCP's communication mechanism\n  </Card>\n</CardGroup>\n\n## Contributing\n\nWant to contribute? Check out our [Contributing Guide](/development/contributing) to learn how you can help improve MCP.\n\n## Support and Feedback\n\nHere's how to get help or provide feedback:\n\n* For bug reports and feature requests related to the MCP specification, SDKs, or documentation (open source), please [create a GitHub issue](https://github.com/modelcontextprotocol)\n* For discussions or Q\\&A about the MCP specification, use the [specification discussions](https://github.com/modelcontextprotocol/specification/discussions)\n* For discussions or Q\\&A about other MCP open source components, use the [organization discussions](https://github.com/orgs/modelcontextprotocol/discussions)\n* For bug reports, feature requests, and questions related to Claude.app and claude.ai's MCP integration, please see Anthropic's guide on [How to Get Support](https://support.anthropic.com/en/articles/9015913-how-to-get-support)\n\n\n# C# SDK\nSource: https://modelcontextprotocol.io/links/sdks/csharp\n\n\n\n<Card title=\"C# SDK\" href=\"https://github.com/modelcontextprotocol/csharp-sdk\" />\n\n\n# Java SDK\nSource: https://modelcontextprotocol.io/links/sdks/java\n\n\n\n<Card title=\"Java SDK\" href=\"https://github.com/modelcontextprotocol/java-sdk\" />\n\n\n# Kotlin SDK\nSource: https://modelcontextprotocol.io/links/sdks/kotlin\n\n\n\n<Card title=\"Kotlin SDK\" href=\"https://github.com/modelcontextprotocol/kotlin-sdk\" />\n\n\n# Python SDK\nSource: https://modelcontextprotocol.io/links/sdks/python\n\n\n\n<Card title=\"Python SDK\" href=\"https://github.com/modelcontextprotocol/python-sdk\" />\n\n\n# Ruby SDK\nSource: https://modelcontextprotocol.io/links/sdks/ruby\n\n\n\n<Card title=\"Ruby SDK\" href=\"https://github.com/modelcontextprotocol/ruby-sdk\" />\n\n\n# Swift SDK\nSource: https://modelcontextprotocol.io/links/sdks/swift\n\n\n\n<Card title=\"Swift SDK\" href=\"https://github.com/modelcontextprotocol/swift-sdk\" />\n\n\n# TypeScript SDK\nSource: https://modelcontextprotocol.io/links/sdks/typescript\n\n\n\n<Card title=\"TypeScript SDK\" href=\"https://github.com/modelcontextprotocol/typescript-sdk\" />\n\n\n# For Client Developers\nSource: https://modelcontextprotocol.io/quickstart/client\n\nGet started building your own client that can integrate with all MCP servers.\n\nIn this tutorial, you'll learn how to build an LLM-powered chatbot client that connects to MCP servers. It helps to have gone through the [Server quickstart](/quickstart/server) that guides you through the basics of building your first server.\n\n<Tabs>\n  <Tab title=\"Python\">\n    [You can find the complete code for this tutorial here.](https://github.com/modelcontextprotocol/quickstart-resources/tree/main/mcp-client-python)\n\n    ## System Requirements\n\n    Before starting, ensure your system meets these requirements:\n\n    * Mac or Windows computer\n    * Latest Python version installed\n    * Latest version of `uv` installed\n\n    ## Setting Up Your Environment\n\n    First, create a new Python project with `uv`:\n\n    ```bash\n    # Create project directory\n    uv init mcp-client\n    cd mcp-client\n\n    # Create virtual environment\n    uv venv\n\n    # Activate virtual environment\n    # On Windows:\n    .venv\\Scripts\\activate\n    # On Unix or MacOS:\n    source .venv/bin/activate\n\n    # Install required packages\n    uv add mcp anthropic python-dotenv\n\n    # Remove boilerplate files\n    # On Windows:\n    del main.py\n    # On Unix or MacOS:\n    rm main.py\n\n    # Create our main file\n    touch client.py\n    ```\n\n    ## Setting Up Your API Key\n\n    You'll need an Anthropic API key from the [Anthropic Console](https://console.anthropic.com/settings/keys).\n\n    Create a `.env` file to store it:\n\n    ```bash\n    # Create .env file\n    touch .env\n    ```\n\n    Add your key to the `.env` file:\n\n    ```bash\n    ANTHROPIC_API_KEY=<your key here>\n    ```\n\n    Add `.env` to your `.gitignore`:\n\n    ```bash\n    echo \".env\" >> .gitignore\n    ```\n\n    <Warning>\n      Make sure you keep your `ANTHROPIC_API_KEY` secure!\n    </Warning>\n\n    ## Creating the Client\n\n    ### Basic Client Structure\n\n    First, let's set up our imports and create the basic client class:\n\n    ```python\n    import asyncio\n    from typing import Optional\n    from contextlib import AsyncExitStack\n\n    from mcp import ClientSession, StdioServerParameters\n    from mcp.client.stdio import stdio_client\n\n    from anthropic import Anthropic\n    from dotenv import load_dotenv\n\n    load_dotenv()  # load environment variables from .env\n\n    class MCPClient:\n        def __init__(self):\n            # Initialize session and client objects\n            self.session: Optional[ClientSession] = None\n            self.exit_stack = AsyncExitStack()\n            self.anthropic = Anthropic()\n        # methods will go here\n    ```\n\n    ### Server Connection Management\n\n    Next, we'll implement the method to connect to an MCP server:\n\n    ```python\n    async def connect_to_server(self, server_script_path: str):\n        \"\"\"Connect to an MCP server\n\n        Args:\n            server_script_path: Path to the server script (.py or .js)\n        \"\"\"\n        is_python = server_script_path.endswith('.py')\n        is_js = server_script_path.endswith('.js')\n        if not (is_python or is_js):\n            raise ValueError(\"Server script must be a .py or .js file\")\n\n        command = \"python\" if is_python else \"node\"\n        server_params = StdioServerParameters(\n            command=command,\n            args=[server_script_path],\n            env=None\n        )\n\n        stdio_transport = await self.exit_stack.enter_async_context(stdio_client(server_params))\n        self.stdio, self.write = stdio_transport\n        self.session = await self.exit_stack.enter_async_context(ClientSession(self.stdio, self.write))\n\n        await self.session.initialize()\n\n        # List available tools\n        response = await self.session.list_tools()\n        tools = response.tools\n        print(\"\\nConnected to server with tools:\", [tool.name for tool in tools])\n    ```\n\n    ### Query Processing Logic\n\n    Now let's add the core functionality for processing queries and handling tool calls:\n\n    ```python\n    async def process_query(self, query: str) -> str:\n        \"\"\"Process a query using Claude and available tools\"\"\"\n        messages = [\n            {\n                \"role\": \"user\",\n                \"content\": query\n            }\n        ]\n\n        response = await self.session.list_tools()\n        available_tools = [{\n            \"name\": tool.name,\n            \"description\": tool.description,\n            \"input_schema\": tool.inputSchema\n        } for tool in response.tools]\n\n        # Initial Claude API call\n        response = self.anthropic.messages.create(\n            model=\"claude-3-5-sonnet-20241022\",\n            max_tokens=1000,\n            messages=messages,\n            tools=available_tools\n        )\n\n        # Process response and handle tool calls\n        final_text = []\n\n        assistant_message_content = []\n        for content in response.content:\n            if content.type == 'text':\n                final_text.append(content.text)\n                assistant_message_content.append(content)\n            elif content.type == 'tool_use':\n                tool_name = content.name\n                tool_args = content.input\n\n                # Execute tool call\n                result = await self.session.call_tool(tool_name, tool_args)\n                final_text.append(f\"[Calling tool {tool_name} with args {tool_args}]\")\n\n                assistant_message_content.append(content)\n                messages.append({\n                    \"role\": \"assistant\",\n                    \"content\": assistant_message_content\n                })\n                messages.append({\n                    \"role\": \"user\",\n                    \"content\": [\n                        {\n                            \"type\": \"tool_result\",\n                            \"tool_use_id\": content.id,\n                            \"content\": result.content\n                        }\n                    ]\n                })\n\n                # Get next response from Claude\n                response = self.anthropic.messages.create(\n                    model=\"claude-3-5-sonnet-20241022\",\n                    max_tokens=1000,\n                    messages=messages,\n                    tools=available_tools\n                )\n\n                final_text.append(response.content[0].text)\n\n        return \"\\n\".join(final_text)\n    ```\n\n    ### Interactive Chat Interface\n\n    Now we'll add the chat loop and cleanup functionality:\n\n    ```python\n    async def chat_loop(self):\n        \"\"\"Run an interactive chat loop\"\"\"\n        print(\"\\nMCP Client Started!\")\n        print(\"Type your queries or 'quit' to exit.\")\n\n        while True:\n            try:\n                query = input(\"\\nQuery: \").strip()\n\n                if query.lower() == 'quit':\n                    break\n\n                response = await self.process_query(query)\n                print(\"\\n\" + response)\n\n            except Exception as e:\n                print(f\"\\nError: {str(e)}\")\n\n    async def cleanup(self):\n        \"\"\"Clean up resources\"\"\"\n        await self.exit_stack.aclose()\n    ```\n\n    ### Main Entry Point\n\n    Finally, we'll add the main execution logic:\n\n    ```python\n    async def main():\n        if len(sys.argv) < 2:\n            print(\"Usage: python client.py <path_to_server_script>\")\n            sys.exit(1)\n\n        client = MCPClient()\n        try:\n            await client.connect_to_server(sys.argv[1])\n            await client.chat_loop()\n        finally:\n            await client.cleanup()\n\n    if __name__ == \"__main__\":\n        import sys\n        asyncio.run(main())\n    ```\n\n    You can find the complete `client.py` file [here.](https://gist.github.com/zckly/f3f28ea731e096e53b39b47bf0a2d4b1)\n\n    ## Key Components Explained\n\n    ### 1. Client Initialization\n\n    * The `MCPClient` class initializes with session management and API clients\n    * Uses `AsyncExitStack` for proper resource management\n    * Configures the Anthropic client for Claude interactions\n\n    ### 2. Server Connection\n\n    * Supports both Python and Node.js servers\n    * Validates server script type\n    * Sets up proper communication channels\n    * Initializes the session and lists available tools\n\n    ### 3. Query Processing\n\n    * Maintains conversation context\n    * Handles Claude's responses and tool calls\n    * Manages the message flow between Claude and tools\n    * Combines results into a coherent response\n\n    ### 4. Interactive Interface\n\n    * Provides a simple command-line interface\n    * Handles user input and displays responses\n    * Includes basic error handling\n    * Allows graceful exit\n\n    ### 5. Resource Management\n\n    * Proper cleanup of resources\n    * Error handling for connection issues\n    * Graceful shutdown procedures\n\n    ## Common Customization Points\n\n    1. **Tool Handling**\n\n       * Modify `process_query()` to handle specific tool types\n       * Add custom error handling for tool calls\n       * Implement tool-specific response formatting\n\n    2. **Response Processing**\n\n       * Customize how tool results are formatted\n       * Add response filtering or transformation\n       * Implement custom logging\n\n    3. **User Interface**\n       * Add a GUI or web interface\n       * Implement rich console output\n       * Add command history or auto-completion\n\n    ## Running the Client\n\n    To run your client with any MCP server:\n\n    ```bash\n    uv run client.py path/to/server.py # python server\n    uv run client.py path/to/build/index.js # node server\n    ```\n\n    <Note>\n      If you're continuing the weather tutorial from the server quickstart, your command might look something like this: `python client.py .../quickstart-resources/weather-server-python/weather.py`\n    </Note>\n\n    The client will:\n\n    1. Connect to the specified server\n    2. List available tools\n    3. Start an interactive chat session where you can:\n       * Enter queries\n       * See tool executions\n       * Get responses from Claude\n\n    Here's an example of what it should look like if connected to the weather server from the server quickstart:\n\n    <Frame>\n      <img src=\"https://mintlify.s3.us-west-1.amazonaws.com/mcp/images/client-claude-cli-python.png\" />\n    </Frame>\n\n    ## How It Works\n\n    When you submit a query:\n\n    1. The client gets the list of available tools from the server\n    2. Your query is sent to Claude along with tool descriptions\n    3. Claude decides which tools (if any) to use\n    4. The client executes any requested tool calls through the server\n    5. Results are sent back to Claude\n    6. Claude provides a natural language response\n    7. The response is displayed to you\n\n    ## Best practices\n\n    1. **Error Handling**\n\n       * Always wrap tool calls in try-catch blocks\n       * Provide meaningful error messages\n       * Gracefully handle connection issues\n\n    2. **Resource Management**\n\n       * Use `AsyncExitStack` for proper cleanup\n       * Close connections when done\n       * Handle server disconnections\n\n    3. **Security**\n       * Store API keys securely in `.env`\n       * Validate server responses\n       * Be cautious with tool permissions\n\n    ## Troubleshooting\n\n    ### Server Path Issues\n\n    * Double-check the path to your server script is correct\n    * Use the absolute path if the relative path isn't working\n    * For Windows users, make sure to use forward slashes (/) or escaped backslashes (\\\\) in the path\n    * Verify the server file has the correct extension (.py for Python or .js for Node.js)\n\n    Example of correct path usage:\n\n    ```bash\n    # Relative path\n    uv run client.py ./server/weather.py\n\n    # Absolute path\n    uv run client.py /Users/username/projects/mcp-server/weather.py\n\n    # Windows path (either format works)\n    uv run client.py C:/projects/mcp-server/weather.py\n    uv run client.py C:\\\\projects\\\\mcp-server\\\\weather.py\n    ```\n\n    ### Response Timing\n\n    * The first response might take up to 30 seconds to return\n    * This is normal and happens while:\n      * The server initializes\n      * Claude processes the query\n      * Tools are being executed\n    * Subsequent responses are typically faster\n    * Don't interrupt the process during this initial waiting period\n\n    ### Common Error Messages\n\n    If you see:\n\n    * `FileNotFoundError`: Check your server path\n    * `Connection refused`: Ensure the server is running and the path is correct\n    * `Tool execution failed`: Verify the tool's required environment variables are set\n    * `Timeout error`: Consider increasing the timeout in your client configuration\n  </Tab>\n\n  <Tab title=\"Node\">\n    [You can find the complete code for this tutorial here.](https://github.com/modelcontextprotocol/quickstart-resources/tree/main/mcp-client-typescript)\n\n    ## System Requirements\n\n    Before starting, ensure your system meets these requirements:\n\n    * Mac or Windows computer\n    * Node.js 17 or higher installed\n    * Latest version of `npm` installed\n    * Anthropic API key (Claude)\n\n    ## Setting Up Your Environment\n\n    First, let's create and set up our project:\n\n    <CodeGroup>\n      ```bash MacOS/Linux\n      # Create project directory\n      mkdir mcp-client-typescript\n      cd mcp-client-typescript\n\n      # Initialize npm project\n      npm init -y\n\n      # Install dependencies\n      npm install @anthropic-ai/sdk @modelcontextprotocol/sdk dotenv\n\n      # Install dev dependencies\n      npm install -D @types/node typescript\n\n      # Create source file\n      touch index.ts\n      ```\n\n      ```powershell Windows\n      # Create project directory\n      md mcp-client-typescript\n      cd mcp-client-typescript\n\n      # Initialize npm project\n      npm init -y\n\n      # Install dependencies\n      npm install @anthropic-ai/sdk @modelcontextprotocol/sdk dotenv\n\n      # Install dev dependencies\n      npm install -D @types/node typescript\n\n      # Create source file\n      new-item index.ts\n      ```\n    </CodeGroup>\n\n    Update your `package.json` to set `type: \"module\"` and a build script:\n\n    ```json package.json\n    {\n      \"type\": \"module\",\n      \"scripts\": {\n        \"build\": \"tsc && chmod 755 build/index.js\"\n      }\n    }\n    ```\n\n    Create a `tsconfig.json` in the root of your project:\n\n    ```json tsconfig.json\n    {\n      \"compilerOptions\": {\n        \"target\": \"ES2022\",\n        \"module\": \"Node16\",\n        \"moduleResolution\": \"Node16\",\n        \"outDir\": \"./build\",\n        \"rootDir\": \"./\",\n        \"strict\": true,\n        \"esModuleInterop\": true,\n        \"skipLibCheck\": true,\n        \"forceConsistentCasingInFileNames\": true\n      },\n      \"include\": [\"index.ts\"],\n      \"exclude\": [\"node_modules\"]\n    }\n    ```\n\n    ## Setting Up Your API Key\n\n    You'll need an Anthropic API key from the [Anthropic Console](https://console.anthropic.com/settings/keys).\n\n    Create a `.env` file to store it:\n\n    ```bash\n    echo \"ANTHROPIC_API_KEY=<your key here>\" > .env\n    ```\n\n    Add `.env` to your `.gitignore`:\n\n    ```bash\n    echo \".env\" >> .gitignore\n    ```\n\n    <Warning>\n      Make sure you keep your `ANTHROPIC_API_KEY` secure!\n    </Warning>\n\n    ## Creating the Client\n\n    ### Basic Client Structure\n\n    First, let's set up our imports and create the basic client class in `index.ts`:\n\n    ```typescript\n    import { Anthropic } from \"@anthropic-ai/sdk\";\n    import {\n      MessageParam,\n      Tool,\n    } from \"@anthropic-ai/sdk/resources/messages/messages.mjs\";\n    import { Client } from \"@modelcontextprotocol/sdk/client/index.js\";\n    import { StdioClientTransport } from \"@modelcontextprotocol/sdk/client/stdio.js\";\n    import readline from \"readline/promises\";\n    import dotenv from \"dotenv\";\n\n    dotenv.config();\n\n    const ANTHROPIC_API_KEY = process.env.ANTHROPIC_API_KEY;\n    if (!ANTHROPIC_API_KEY) {\n      throw new Error(\"ANTHROPIC_API_KEY is not set\");\n    }\n\n    class MCPClient {\n      private mcp: Client;\n      private anthropic: Anthropic;\n      private transport: StdioClientTransport | null = null;\n      private tools: Tool[] = [];\n\n      constructor() {\n        this.anthropic = new Anthropic({\n          apiKey: ANTHROPIC_API_KEY,\n        });\n        this.mcp = new Client({ name: \"mcp-client-cli\", version: \"1.0.0\" });\n      }\n      // methods will go here\n    }\n    ```\n\n    ### Server Connection Management\n\n    Next, we'll implement the method to connect to an MCP server:\n\n    ```typescript\n    async connectToServer(serverScriptPath: string) {\n      try {\n        const isJs = serverScriptPath.endsWith(\".js\");\n        const isPy = serverScriptPath.endsWith(\".py\");\n        if (!isJs && !isPy) {\n          throw new Error(\"Server script must be a .js or .py file\");\n        }\n        const command = isPy\n          ? process.platform === \"win32\"\n            ? \"python\"\n            : \"python3\"\n          : process.execPath;\n\n        this.transport = new StdioClientTransport({\n          command,\n          args: [serverScriptPath],\n        });\n        await this.mcp.connect(this.transport);\n\n        const toolsResult = await this.mcp.listTools();\n        this.tools = toolsResult.tools.map((tool) => {\n          return {\n            name: tool.name,\n            description: tool.description,\n            input_schema: tool.inputSchema,\n          };\n        });\n        console.log(\n          \"Connected to server with tools:\",\n          this.tools.map(({ name }) => name)\n        );\n      } catch (e) {\n        console.log(\"Failed to connect to MCP server: \", e);\n        throw e;\n      }\n    }\n    ```\n\n    ### Query Processing Logic\n\n    Now let's add the core functionality for processing queries and handling tool calls:\n\n    ```typescript\n    async processQuery(query: string) {\n      const messages: MessageParam[] = [\n        {\n          role: \"user\",\n          content: query,\n        },\n      ];\n\n      const response = await this.anthropic.messages.create({\n        model: \"claude-3-5-sonnet-20241022\",\n        max_tokens: 1000,\n        messages,\n        tools: this.tools,\n      });\n\n      const finalText = [];\n\n      for (const content of response.content) {\n        if (content.type === \"text\") {\n          finalText.push(content.text);\n        } else if (content.type === \"tool_use\") {\n          const toolName = content.name;\n          const toolArgs = content.input as { [x: string]: unknown } | undefined;\n\n          const result = await this.mcp.callTool({\n            name: toolName,\n            arguments: toolArgs,\n          });\n          finalText.push(\n            `[Calling tool ${toolName} with args ${JSON.stringify(toolArgs)}]`\n          );\n\n          messages.push({\n            role: \"user\",\n            content: result.content as string,\n          });\n\n          const response = await this.anthropic.messages.create({\n            model: \"claude-3-5-sonnet-20241022\",\n            max_tokens: 1000,\n            messages,\n          });\n\n          finalText.push(\n            response.content[0].type === \"text\" ? response.content[0].text : \"\"\n          );\n        }\n      }\n\n      return finalText.join(\"\\n\");\n    }\n    ```\n\n    ### Interactive Chat Interface\n\n    Now we'll add the chat loop and cleanup functionality:\n\n    ```typescript\n    async chatLoop() {\n      const rl = readline.createInterface({\n        input: process.stdin,\n        output: process.stdout,\n      });\n\n      try {\n        console.log(\"\\nMCP Client Started!\");\n        console.log(\"Type your queries or 'quit' to exit.\");\n\n        while (true) {\n          const message = await rl.question(\"\\nQuery: \");\n          if (message.toLowerCase() === \"quit\") {\n            break;\n          }\n          const response = await this.processQuery(message);\n          console.log(\"\\n\" + response);\n        }\n      } finally {\n        rl.close();\n      }\n    }\n\n    async cleanup() {\n      await this.mcp.close();\n    }\n    ```\n\n    ### Main Entry Point\n\n    Finally, we'll add the main execution logic:\n\n    ```typescript\n    async function main() {\n      if (process.argv.length < 3) {\n        console.log(\"Usage: node index.ts <path_to_server_script>\");\n        return;\n      }\n      const mcpClient = new MCPClient();\n      try {\n        await mcpClient.connectToServer(process.argv[2]);\n        await mcpClient.chatLoop();\n      } finally {\n        await mcpClient.cleanup();\n        process.exit(0);\n      }\n    }\n\n    main();\n    ```\n\n    ## Running the Client\n\n    To run your client with any MCP server:\n\n    ```bash\n    # Build TypeScript\n    npm run build\n\n    # Run the client\n    node build/index.js path/to/server.py # python server\n    node build/index.js path/to/build/index.js # node server\n    ```\n\n    <Note>\n      If you're continuing the weather tutorial from the server quickstart, your command might look something like this: `node build/index.js .../quickstart-resources/weather-server-typescript/build/index.js`\n    </Note>\n\n    **The client will:**\n\n    1. Connect to the specified server\n    2. List available tools\n    3. Start an interactive chat session where you can:\n       * Enter queries\n       * See tool executions\n       * Get responses from Claude\n\n    ## How It Works\n\n    When you submit a query:\n\n    1. The client gets the list of available tools from the server\n    2. Your query is sent to Claude along with tool descriptions\n    3. Claude decides which tools (if any) to use\n    4. The client executes any requested tool calls through the server\n    5. Results are sent back to Claude\n    6. Claude provides a natural language response\n    7. The response is displayed to you\n\n    ## Best practices\n\n    1. **Error Handling**\n\n       * Use TypeScript's type system for better error detection\n       * Wrap tool calls in try-catch blocks\n       * Provide meaningful error messages\n       * Gracefully handle connection issues\n\n    2. **Security**\n       * Store API keys securely in `.env`\n       * Validate server responses\n       * Be cautious with tool permissions\n\n    ## Troubleshooting\n\n    ### Server Path Issues\n\n    * Double-check the path to your server script is correct\n    * Use the absolute path if the relative path isn't working\n    * For Windows users, make sure to use forward slashes (/) or escaped backslashes (\\\\) in the path\n    * Verify the server file has the correct extension (.js for Node.js or .py for Python)\n\n    Example of correct path usage:\n\n    ```bash\n    # Relative path\n    node build/index.js ./server/build/index.js\n\n    # Absolute path\n    node build/index.js /Users/username/projects/mcp-server/build/index.js\n\n    # Windows path (either format works)\n    node build/index.js C:/projects/mcp-server/build/index.js\n    node build/index.js C:\\\\projects\\\\mcp-server\\\\build\\\\index.js\n    ```\n\n    ### Response Timing\n\n    * The first response might take up to 30 seconds to return\n    * This is normal and happens while:\n      * The server initializes\n      * Claude processes the query\n      * Tools are being executed\n    * Subsequent responses are typically faster\n    * Don't interrupt the process during this initial waiting period\n\n    ### Common Error Messages\n\n    If you see:\n\n    * `Error: Cannot find module`: Check your build folder and ensure TypeScript compilation succeeded\n    * `Connection refused`: Ensure the server is running and the path is correct\n    * `Tool execution failed`: Verify the tool's required environment variables are set\n    * `ANTHROPIC_API_KEY is not set`: Check your .env file and environment variables\n    * `TypeError`: Ensure you're using the correct types for tool arguments\n  </Tab>\n\n  <Tab title=\"Java\">\n    <Note>\n      This is a quickstart demo based on Spring AI MCP auto-configuration and boot starters.\n      To learn how to create sync and async MCP Clients manually, consult the [Java SDK Client](/sdk/java/mcp-client) documentation\n    </Note>\n\n    This example demonstrates how to build an interactive chatbot that combines Spring AI's Model Context Protocol (MCP) with the [Brave Search MCP Server](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/brave-search). The application creates a conversational interface powered by Anthropic's Claude AI model that can perform internet searches through Brave Search, enabling natural language interactions with real-time web data.\n    [You can find the complete code for this tutorial here.](https://github.com/spring-projects/spring-ai-examples/tree/main/model-context-protocol/web-search/brave-chatbot)\n\n    ## System Requirements\n\n    Before starting, ensure your system meets these requirements:\n\n    * Java 17 or higher\n    * Maven 3.6+\n    * npx package manager\n    * Anthropic API key (Claude)\n    * Brave Search API key\n\n    ## Setting Up Your Environment\n\n    1. Install npx (Node Package eXecute):\n       First, make sure to install [npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm)\n       and then run:\n\n       ```bash\n       npm install -g npx\n       ```\n\n    2. Clone the repository:\n\n       ```bash\n       git clone https://github.com/spring-projects/spring-ai-examples.git\n       cd model-context-protocol/brave-chatbot\n       ```\n\n    3. Set up your API keys:\n\n       ```bash\n       export ANTHROPIC_API_KEY='your-anthropic-api-key-here'\n       export BRAVE_API_KEY='your-brave-api-key-here'\n       ```\n\n    4. Build the application:\n\n       ```bash\n       ./mvnw clean install\n       ```\n\n    5. Run the application using Maven:\n       ```bash\n       ./mvnw spring-boot:run\n       ```\n\n    <Warning>\n      Make sure you keep your `ANTHROPIC_API_KEY` and `BRAVE_API_KEY` keys secure!\n    </Warning>\n\n    ## How it Works\n\n    The application integrates Spring AI with the Brave Search MCP server through several components:\n\n    ### MCP Client Configuration\n\n    1. Required dependencies in pom.xml:\n\n    ```xml\n    <dependency>\n        <groupId>org.springframework.ai</groupId>\n        <artifactId>spring-ai-starter-mcp-client</artifactId>\n    </dependency>\n    <dependency>\n        <groupId>org.springframework.ai</groupId>\n        <artifactId>spring-ai-starter-model-anthropic</artifactId>\n    </dependency>\n    ```\n\n    2. Application properties (application.yml):\n\n    ```yml\n    spring:\n      ai:\n        mcp:\n          client:\n            enabled: true\n            name: brave-search-client\n            version: 1.0.0\n            type: SYNC\n            request-timeout: 20s\n            stdio:\n              root-change-notification: true\n              servers-configuration: classpath:/mcp-servers-config.json\n            toolcallback:\n              enabled: true\n        anthropic:\n          api-key: ${ANTHROPIC_API_KEY}\n    ```\n\n    This activates the `spring-ai-starter-mcp-client` to create one or more `McpClient`s based on the provided server configuration.\n    The `spring.ai.mcp.client.toolcallback.enabled=true` property enables the tool callback mechanism, that automatically registers all MCP tool as spring ai tools.\n    It is disabled by default.\n\n    3. MCP Server Configuration (`mcp-servers-config.json`):\n\n    ```json\n    {\n      \"mcpServers\": {\n        \"brave-search\": {\n          \"command\": \"npx\",\n          \"args\": [\"-y\", \"@modelcontextprotocol/server-brave-search\"],\n          \"env\": {\n            \"BRAVE_API_KEY\": \"<PUT YOUR BRAVE API KEY>\"\n          }\n        }\n      }\n    }\n    ```\n\n    ### Chat Implementation\n\n    The chatbot is implemented using Spring AI's ChatClient with MCP tool integration:\n\n    ```java\n    var chatClient = chatClientBuilder\n        .defaultSystem(\"You are useful assistant, expert in AI and Java.\")\n        .defaultToolCallbacks((Object[]) mcpToolAdapter.toolCallbacks())\n        .defaultAdvisors(new MessageChatMemoryAdvisor(new InMemoryChatMemory()))\n        .build();\n    ```\n\n    <Warning>\n      Breaking change: From SpringAI 1.0.0-M8 onwards, use `.defaultToolCallbacks(...)` instead of `.defaultTool(...)` to register MCP tools.\n    </Warning>\n\n    Key features:\n\n    * Uses Claude AI model for natural language understanding\n    * Integrates Brave Search through MCP for real-time web search capabilities\n    * Maintains conversation memory using InMemoryChatMemory\n    * Runs as an interactive command-line application\n\n    ### Build and run\n\n    ```bash\n    ./mvnw clean install\n    java -jar ./target/ai-mcp-brave-chatbot-0.0.1-SNAPSHOT.jar\n    ```\n\n    or\n\n    ```bash\n    ./mvnw spring-boot:run\n    ```\n\n    The application will start an interactive chat session where you can ask questions. The chatbot will use Brave Search when it needs to find information from the internet to answer your queries.\n\n    The chatbot can:\n\n    * Answer questions using its built-in knowledge\n    * Perform web searches when needed using Brave Search\n    * Remember context from previous messages in the conversation\n    * Combine information from multiple sources to provide comprehensive answers\n\n    ### Advanced Configuration\n\n    The MCP client supports additional configuration options:\n\n    * Client customization through `McpSyncClientCustomizer` or `McpAsyncClientCustomizer`\n    * Multiple clients with multiple transport types: `STDIO` and `SSE` (Server-Sent Events)\n    * Integration with Spring AI's tool execution framework\n    * Automatic client initialization and lifecycle management\n\n    For WebFlux-based applications, you can use the WebFlux starter instead:\n\n    ```xml\n    <dependency>\n        <groupId>org.springframework.ai</groupId>\n        <artifactId>spring-ai-mcp-client-webflux-spring-boot-starter</artifactId>\n    </dependency>\n    ```\n\n    This provides similar functionality but uses a WebFlux-based SSE transport implementation, recommended for production deployments.\n  </Tab>\n\n  <Tab title=\"Kotlin\">\n    [You can find the complete code for this tutorial here.](https://github.com/modelcontextprotocol/kotlin-sdk/tree/main/samples/kotlin-mcp-client)\n\n    ## System Requirements\n\n    Before starting, ensure your system meets these requirements:\n\n    * Java 17 or higher\n    * Anthropic API key (Claude)\n\n    ## Setting up your environment\n\n    First, let's install `java` and `gradle` if you haven't already.\n    You can download `java` from [official Oracle JDK website](https://www.oracle.com/java/technologies/downloads/).\n    Verify your `java` installation:\n\n    ```bash\n    java --version\n    ```\n\n    Now, let's create and set up your project:\n\n    <CodeGroup>\n      ```bash MacOS/Linux\n      # Create a new directory for our project\n      mkdir kotlin-mcp-client\n      cd kotlin-mcp-client\n\n      # Initialize a new kotlin project\n      gradle init\n      ```\n\n      ```powershell Windows\n      # Create a new directory for our project\n      md kotlin-mcp-client\n      cd kotlin-mcp-client\n      # Initialize a new kotlin project\n      gradle init\n      ```\n    </CodeGroup>\n\n    After running `gradle init`, you will be presented with options for creating your project.\n    Select **Application** as the project type, **Kotlin** as the programming language, and **Java 17** as the Java version.\n\n    Alternatively, you can create a Kotlin application using the [IntelliJ IDEA project wizard](https://kotlinlang.org/docs/jvm-get-started.html).\n\n    After creating the project, add the following dependencies:\n\n    <CodeGroup>\n      ```kotlin build.gradle.kts\n      val mcpVersion = \"0.4.0\"\n      val slf4jVersion = \"2.0.9\"\n      val anthropicVersion = \"0.8.0\"\n\n      dependencies {\n          implementation(\"io.modelcontextprotocol:kotlin-sdk:$mcpVersion\")\n          implementation(\"org.slf4j:slf4j-nop:$slf4jVersion\")\n          implementation(\"com.anthropic:anthropic-java:$anthropicVersion\")\n      }\n      ```\n\n      ```groovy build.gradle\n      def mcpVersion = '0.3.0'\n      def slf4jVersion = '2.0.9'\n      def anthropicVersion = '0.8.0'\n      dependencies {\n          implementation \"io.modelcontextprotocol:kotlin-sdk:$mcpVersion\"\n          implementation \"org.slf4j:slf4j-nop:$slf4jVersion\"\n          implementation \"com.anthropic:anthropic-java:$anthropicVersion\"\n      }\n      ```\n    </CodeGroup>\n\n    Also, add the following plugins to your build script:\n\n    <CodeGroup>\n      ```kotlin build.gradle.kts\n      plugins {\n          id(\"com.github.johnrengelman.shadow\") version \"8.1.1\"\n      }\n      ```\n\n      ```groovy build.gradle\n      plugins {\n          id 'com.github.johnrengelman.shadow' version '8.1.1'\n      }\n      ```\n    </CodeGroup>\n\n    ## Setting up your API key\n\n    You'll need an Anthropic API key from the [Anthropic Console](https://console.anthropic.com/settings/keys).\n\n    Set up your API key:\n\n    ```bash\n    export ANTHROPIC_API_KEY='your-anthropic-api-key-here'\n    ```\n\n    <Warning>\n      Make sure your keep your `ANTHROPIC_API_KEY` secure!\n    </Warning>\n\n    ## Creating the Client\n\n    ### Basic Client Structure\n\n    First, let's create the basic client class:\n\n    ```kotlin\n    class MCPClient : AutoCloseable {\n        private val anthropic = AnthropicOkHttpClient.fromEnv()\n        private val mcp: Client = Client(clientInfo = Implementation(name = \"mcp-client-cli\", version = \"1.0.0\"))\n        private lateinit var tools: List<ToolUnion>\n\n        // methods will go here\n\n        override fun close() {\n            runBlocking {\n                mcp.close()\n                anthropic.close()\n            }\n        }\n    ```\n\n    ### Server connection management\n\n    Next, we'll implement the method to connect to an MCP server:\n\n    ```kotlin\n    suspend fun connectToServer(serverScriptPath: String) {\n        try {\n            val command = buildList {\n                when (serverScriptPath.substringAfterLast(\".\")) {\n                    \"js\" -> add(\"node\")\n                    \"py\" -> add(if (System.getProperty(\"os.name\").lowercase().contains(\"win\")) \"python\" else \"python3\")\n                    \"jar\" -> addAll(listOf(\"java\", \"-jar\"))\n                    else -> throw IllegalArgumentException(\"Server script must be a .js, .py or .jar file\")\n                }\n                add(serverScriptPath)\n            }\n\n            val process = ProcessBuilder(command).start()\n            val transport = StdioClientTransport(\n                input = process.inputStream.asSource().buffered(),\n                output = process.outputStream.asSink().buffered()\n            )\n\n            mcp.connect(transport)\n\n            val toolsResult = mcp.listTools()\n            tools = toolsResult?.tools?.map { tool ->\n                ToolUnion.ofTool(\n                    Tool.builder()\n                        .name(tool.name)\n                        .description(tool.description ?: \"\")\n                        .inputSchema(\n                            Tool.InputSchema.builder()\n                                .type(JsonValue.from(tool.inputSchema.type))\n                                .properties(tool.inputSchema.properties.toJsonValue())\n                                .putAdditionalProperty(\"required\", JsonValue.from(tool.inputSchema.required))\n                                .build()\n                        )\n                        .build()\n                )\n            } ?: emptyList()\n            println(\"Connected to server with tools: ${tools.joinToString(\", \") { it.tool().get().name() }}\")\n        } catch (e: Exception) {\n            println(\"Failed to connect to MCP server: $e\")\n            throw e\n        }\n    }\n    ```\n\n    Also create a helper function to convert from `JsonObject` to `JsonValue` for Anthropic:\n\n    ```kotlin\n    private fun JsonObject.toJsonValue(): JsonValue {\n        val mapper = ObjectMapper()\n        val node = mapper.readTree(this.toString())\n        return JsonValue.fromJsonNode(node)\n    }\n    ```\n\n    ### Query processing logic\n\n    Now let's add the core functionality for processing queries and handling tool calls:\n\n    ```kotlin\n    private val messageParamsBuilder: MessageCreateParams.Builder = MessageCreateParams.builder()\n        .model(Model.CLAUDE_3_5_SONNET_20241022)\n        .maxTokens(1024)\n\n    suspend fun processQuery(query: String): String {\n        val messages = mutableListOf(\n            MessageParam.builder()\n                .role(MessageParam.Role.USER)\n                .content(query)\n                .build()\n        )\n\n        val response = anthropic.messages().create(\n            messageParamsBuilder\n                .messages(messages)\n                .tools(tools)\n                .build()\n        )\n\n        val finalText = mutableListOf<String>()\n        response.content().forEach { content ->\n            when {\n                content.isText() -> finalText.add(content.text().getOrNull()?.text() ?: \"\")\n\n                content.isToolUse() -> {\n                    val toolName = content.toolUse().get().name()\n                    val toolArgs =\n                        content.toolUse().get()._input().convert(object : TypeReference<Map<String, JsonValue>>() {})\n\n                    val result = mcp.callTool(\n                        name = toolName,\n                        arguments = toolArgs ?: emptyMap()\n                    )\n                    finalText.add(\"[Calling tool $toolName with args $toolArgs]\")\n\n                    messages.add(\n                        MessageParam.builder()\n                            .role(MessageParam.Role.USER)\n                            .content(\n                                \"\"\"\n                                    \"type\": \"tool_result\",\n                                    \"tool_name\": $toolName,\n                                    \"result\": ${result?.content?.joinToString(\"\\n\") { (it as TextContent).text ?: \"\" }}\n                                \"\"\".trimIndent()\n                            )\n                            .build()\n                    )\n\n                    val aiResponse = anthropic.messages().create(\n                        messageParamsBuilder\n                            .messages(messages)\n                            .build()\n                    )\n\n                    finalText.add(aiResponse.content().first().text().getOrNull()?.text() ?: \"\")\n                }\n            }\n        }\n\n        return finalText.joinToString(\"\\n\", prefix = \"\", postfix = \"\")\n    }\n    ```\n\n    ### Interactive chat\n\n    We'll add the chat loop:\n\n    ```kotlin\n    suspend fun chatLoop() {\n        println(\"\\nMCP Client Started!\")\n        println(\"Type your queries or 'quit' to exit.\")\n\n        while (true) {\n            print(\"\\nQuery: \")\n            val message = readLine() ?: break\n            if (message.lowercase() == \"quit\") break\n            val response = processQuery(message)\n            println(\"\\n$response\")\n        }\n    }\n    ```\n\n    ### Main entry point\n\n    Finally, we'll add the main execution function:\n\n    ```kotlin\n    fun main(args: Array<String>) = runBlocking {\n        if (args.isEmpty()) throw IllegalArgumentException(\"Usage: java -jar <your_path>/build/libs/kotlin-mcp-client-0.1.0-all.jar <path_to_server_script>\")\n        val serverPath = args.first()\n        val client = MCPClient()\n        client.use {\n            client.connectToServer(serverPath)\n            client.chatLoop()\n        }\n    }\n    ```\n\n    ## Running the client\n\n    To run your client with any MCP server:\n\n    ```bash\n    ./gradlew build\n\n    # Run the client\n    java -jar build/libs/<your-jar-name>.jar path/to/server.jar # jvm server\n    java -jar build/libs/<your-jar-name>.jar path/to/server.py # python server\n    java -jar build/libs/<your-jar-name>.jar path/to/build/index.js # node server\n    ```\n\n    <Note>\n      If you're continuing the weather tutorial from the server quickstart, your command might look something like this: `java -jar build/libs/kotlin-mcp-client-0.1.0-all.jar .../samples/weather-stdio-server/build/libs/weather-stdio-server-0.1.0-all.jar`\n    </Note>\n\n    **The client will:**\n\n    1. Connect to the specified server\n    2. List available tools\n    3. Start an interactive chat session where you can:\n       * Enter queries\n       * See tool executions\n       * Get responses from Claude\n\n    ## How it works\n\n    Here's a high-level workflow schema:\n\n    ```mermaid\n    ---\n    config:\n        theme: neutral\n    ---\n    sequenceDiagram\n        actor User\n        participant Client\n        participant Claude\n        participant MCP_Server as MCP Server\n        participant Tools\n\n        User->>Client: Send query\n        Client<<->>MCP_Server: Get available tools\n        Client->>Claude: Send query with tool descriptions\n        Claude-->>Client: Decide tool execution\n        Client->>MCP_Server: Request tool execution\n        MCP_Server->>Tools: Execute chosen tools\n        Tools-->>MCP_Server: Return results\n        MCP_Server-->>Client: Send results\n        Client->>Claude: Send tool results\n        Claude-->>Client: Provide final response\n        Client-->>User: Display response\n    ```\n\n    When you submit a query:\n\n    1. The client gets the list of available tools from the server\n    2. Your query is sent to Claude along with tool descriptions\n    3. Claude decides which tools (if any) to use\n    4. The client executes any requested tool calls through the server\n    5. Results are sent back to Claude\n    6. Claude provides a natural language response\n    7. The response is displayed to you\n\n    ## Best practices\n\n    1. **Error Handling**\n\n       * Leverage Kotlin's type system to model errors explicitly\n       * Wrap external tool and API calls in `try-catch` blocks when exceptions are possible\n       * Provide clear and meaningful error messages\n       * Handle network timeouts and connection issues gracefully\n\n    2. **Security**\n       * Store API keys and secrets securely in `local.properties`, environment variables, or secret managers\n       * Validate all external responses to avoid unexpected or unsafe data usage\n       * Be cautious with permissions and trust boundaries when using tools\n\n    ## Troubleshooting\n\n    ### Server Path Issues\n\n    * Double-check the path to your server script is correct\n    * Use the absolute path if the relative path isn't working\n    * For Windows users, make sure to use forward slashes (/) or escaped backslashes (\\\\) in the path\n    * Make sure that the required runtime is installed (java for Java, npm for Node.js, or uv for Python)\n    * Verify the server file has the correct extension (.jar for Java, .js for Node.js or .py for Python)\n\n    Example of correct path usage:\n\n    ```bash\n    # Relative path\n    java -jar build/libs/client.jar ./server/build/libs/server.jar\n\n    # Absolute path\n    java -jar build/libs/client.jar /Users/username/projects/mcp-server/build/libs/server.jar\n\n    # Windows path (either format works)\n    java -jar build/libs/client.jar C:/projects/mcp-server/build/libs/server.jar\n    java -jar build/libs/client.jar C:\\\\projects\\\\mcp-server\\\\build\\\\libs\\\\server.jar\n    ```\n\n    ### Response Timing\n\n    * The first response might take up to 30 seconds to return\n    * This is normal and happens while:\n      * The server initializes\n      * Claude processes the query\n      * Tools are being executed\n    * Subsequent responses are typically faster\n    * Don't interrupt the process during this initial waiting period\n\n    ### Common Error Messages\n\n    If you see:\n\n    * `Connection refused`: Ensure the server is running and the path is correct\n    * `Tool execution failed`: Verify the tool's required environment variables are set\n    * `ANTHROPIC_API_KEY is not set`: Check your environment variables\n  </Tab>\n\n  <Tab title=\"C#\">\n    [You can find the complete code for this tutorial here.](https://github.com/modelcontextprotocol/csharp-sdk/tree/main/samples/QuickstartClient)\n\n    ## System Requirements\n\n    Before starting, ensure your system meets these requirements:\n\n    * .NET 8.0 or higher\n    * Anthropic API key (Claude)\n    * Windows, Linux, or MacOS\n\n    ## Setting up your environment\n\n    First, create a new .NET project:\n\n    ```bash\n    dotnet new console -n QuickstartClient\n    cd QuickstartClient\n    ```\n\n    Then, add the required dependencies to your project:\n\n    ```bash\n    dotnet add package ModelContextProtocol --prerelease\n    dotnet add package Anthropic.SDK\n    dotnet add package Microsoft.Extensions.Hosting\n    dotnet add package Microsoft.Extensions.AI\n    ```\n\n    ## Setting up your API key\n\n    You'll need an Anthropic API key from the [Anthropic Console](https://console.anthropic.com/settings/keys).\n\n    ```bash\n    dotnet user-secrets init\n    dotnet user-secrets set \"ANTHROPIC_API_KEY\" \"<your key here>\"\n    ```\n\n    ## Creating the Client\n\n    ### Basic Client Structure\n\n    First, let's setup the basic client class in the file `Program.cs`:\n\n    ```csharp\n    using Anthropic.SDK;\n    using Microsoft.Extensions.AI;\n    using Microsoft.Extensions.Configuration;\n    using Microsoft.Extensions.Hosting;\n    using ModelContextProtocol.Client;\n    using ModelContextProtocol.Protocol.Transport;\n\n    var builder = Host.CreateApplicationBuilder(args);\n\n    builder.Configuration\n        .AddEnvironmentVariables()\n        .AddUserSecrets<Program>();\n    ```\n\n    This creates the beginnings of a .NET console application that can read the API key from user secrets.\n\n    Next, we'll setup the MCP Client:\n\n    ```csharp\n    var (command, arguments) = GetCommandAndArguments(args);\n\n    var clientTransport = new StdioClientTransport(new()\n    {\n        Name = \"Demo Server\",\n        Command = command,\n        Arguments = arguments,\n    });\n\n    await using var mcpClient = await McpClientFactory.CreateAsync(clientTransport);\n\n    var tools = await mcpClient.ListToolsAsync();\n    foreach (var tool in tools)\n    {\n        Console.WriteLine($\"Connected to server with tools: {tool.Name}\");\n    }\n    ```\n\n    Add this function at the end of the `Program.cs` file:\n\n    ```csharp\n    static (string command, string[] arguments) GetCommandAndArguments(string[] args)\n    {\n        return args switch\n        {\n            [var script] when script.EndsWith(\".py\") => (\"python\", args),\n            [var script] when script.EndsWith(\".js\") => (\"node\", args),\n            [var script] when Directory.Exists(script) || (File.Exists(script) && script.EndsWith(\".csproj\")) => (\"dotnet\", [\"run\", \"--project\", script, \"--no-build\"]),\n            _ => throw new NotSupportedException(\"An unsupported server script was provided. Supported scripts are .py, .js, or .csproj\")\n        };\n    }\n    ```\n\n    This creates a MCP client that will connect to a server that is provided as a command line argument. It then lists the available tools from the connected server.\n\n    ### Query processing logic\n\n    Now let's add the core functionality for processing queries and handling tool calls:\n\n    ```csharp\n    using var anthropicClient = new AnthropicClient(new APIAuthentication(builder.Configuration[\"ANTHROPIC_API_KEY\"]))\n        .Messages\n        .AsBuilder()\n        .UseFunctionInvocation()\n        .Build();\n\n    var options = new ChatOptions\n    {\n        MaxOutputTokens = 1000,\n        ModelId = \"claude-3-5-sonnet-20241022\",\n        Tools = [.. tools]\n    };\n\n    Console.ForegroundColor = ConsoleColor.Green;\n    Console.WriteLine(\"MCP Client Started!\");\n    Console.ResetColor();\n\n    PromptForInput();\n    while(Console.ReadLine() is string query && !\"exit\".Equals(query, StringComparison.OrdinalIgnoreCase))\n    {\n        if (string.IsNullOrWhiteSpace(query))\n        {\n            PromptForInput();\n            continue;\n        }\n\n        await foreach (var message in anthropicClient.GetStreamingResponseAsync(query, options))\n        {\n            Console.Write(message);\n        }\n        Console.WriteLine();\n\n        PromptForInput();\n    }\n\n    static void PromptForInput()\n    {\n        Console.WriteLine(\"Enter a command (or 'exit' to quit):\");\n        Console.ForegroundColor = ConsoleColor.Cyan;\n        Console.Write(\"> \");\n        Console.ResetColor();\n    }\n    ```\n\n    ## Key Components Explained\n\n    ### 1. Client Initialization\n\n    * The client is initialized using `McpClientFactory.CreateAsync()`, which sets up the transport type and command to run the server.\n\n    ### 2. Server Connection\n\n    * Supports Python, Node.js, and .NET servers.\n    * The server is started using the command specified in the arguments.\n    * Configures to use stdio for communication with the server.\n    * Initializes the session and available tools.\n\n    ### 3. Query Processing\n\n    * Leverages [Microsoft.Extensions.AI](https://learn.microsoft.com/dotnet/ai/ai-extensions) for the chat client.\n    * Configures the `IChatClient` to use automatic tool (function) invocation.\n    * The client reads user input and sends it to the server.\n    * The server processes the query and returns a response.\n    * The response is displayed to the user.\n\n    ## Running the Client\n\n    To run your client with any MCP server:\n\n    ```bash\n    dotnet run -- path/to/server.csproj # dotnet server\n    dotnet run -- path/to/server.py # python server\n    dotnet run -- path/to/server.js # node server\n    ```\n\n    <Note>\n      If you're continuing the weather tutorial from the server quickstart, your command might look something like this: `dotnet run -- path/to/QuickstartWeatherServer`.\n    </Note>\n\n    The client will:\n\n    1. Connect to the specified server\n    2. List available tools\n    3. Start an interactive chat session where you can:\n       * Enter queries\n       * See tool executions\n       * Get responses from Claude\n    4. Exit the session when done\n\n    Here's an example of what it should look like it connected to a weather server quickstart:\n\n    <Frame>\n      <img src=\"https://mintlify.s3.us-west-1.amazonaws.com/mcp/images/quickstart-dotnet-client.png\" />\n    </Frame>\n  </Tab>\n</Tabs>\n\n## Next steps\n\n<CardGroup cols={2}>\n  <Card title=\"Example servers\" icon=\"grid\" href=\"/examples\">\n    Check out our gallery of official MCP servers and implementations\n  </Card>\n\n  <Card title=\"Clients\" icon=\"cubes\" href=\"/clients\">\n    View the list of clients that support MCP integrations\n  </Card>\n\n  <Card title=\"Building MCP with LLMs\" icon=\"comments\" href=\"/tutorials/building-mcp-with-llms\">\n    Learn how to use LLMs like Claude to speed up your MCP development\n  </Card>\n\n  <Card title=\"Core architecture\" icon=\"sitemap\" href=\"/docs/concepts/architecture\">\n    Understand how MCP connects clients, servers, and LLMs\n  </Card>\n</CardGroup>\n\n\n# For Server Developers\nSource: https://modelcontextprotocol.io/quickstart/server\n\nGet started building your own server to use in Claude for Desktop and other clients.\n\nIn this tutorial, we'll build a simple MCP weather server and connect it to a host, Claude for Desktop. We'll start with a basic setup, and then progress to more complex use cases.\n\n### What we'll be building\n\nMany LLMs do not currently have the ability to fetch the forecast and severe weather alerts. Let's use MCP to solve that!\n\nWe'll build a server that exposes two tools: `get-alerts` and `get-forecast`. Then we'll connect the server to an MCP host (in this case, Claude for Desktop):\n\n<Frame>\n  <img src=\"https://mintlify.s3.us-west-1.amazonaws.com/mcp/images/weather-alerts.png\" />\n</Frame>\n\n<Frame>\n  <img src=\"https://mintlify.s3.us-west-1.amazonaws.com/mcp/images/current-weather.png\" />\n</Frame>\n\n<Note>\n  Servers can connect to any client. We've chosen Claude for Desktop here for simplicity, but we also have guides on [building your own client](/quickstart/client) as well as a [list of other clients here](/clients).\n</Note>\n\n### Core MCP Concepts\n\nMCP servers can provide three main types of capabilities:\n\n1. **Resources**: File-like data that can be read by clients (like API responses or file contents)\n2. **Tools**: Functions that can be called by the LLM (with user approval)\n3. **Prompts**: Pre-written templates that help users accomplish specific tasks\n\nThis tutorial will primarily focus on tools.\n\n<Tabs>\n  <Tab title=\"Python\">\n    Let's get started with building our weather server! [You can find the complete code for what we'll be building here.](https://github.com/modelcontextprotocol/quickstart-resources/tree/main/weather-server-python)\n\n    ### Prerequisite knowledge\n\n    This quickstart assumes you have familiarity with:\n\n    * Python\n    * LLMs like Claude\n\n    ### System requirements\n\n    * Python 3.10 or higher installed.\n    * You must use the Python MCP SDK 1.2.0 or higher.\n\n    ### Set up your environment\n\n    First, let's install `uv` and set up our Python project and environment:\n\n    <CodeGroup>\n      ```bash MacOS/Linux\n      curl -LsSf https://astral.sh/uv/install.sh | sh\n      ```\n\n      ```powershell Windows\n      powershell -ExecutionPolicy ByPass -c \"irm https://astral.sh/uv/install.ps1 | iex\"\n      ```\n    </CodeGroup>\n\n    Make sure to restart your terminal afterwards to ensure that the `uv` command gets picked up.\n\n    Now, let's create and set up our project:\n\n    <CodeGroup>\n      ```bash MacOS/Linux\n      # Create a new directory for our project\n      uv init weather\n      cd weather\n\n      # Create virtual environment and activate it\n      uv venv\n      source .venv/bin/activate\n\n      # Install dependencies\n      uv add \"mcp[cli]\" httpx\n\n      # Create our server file\n      touch weather.py\n      ```\n\n      ```powershell Windows\n      # Create a new directory for our project\n      uv init weather\n      cd weather\n\n      # Create virtual environment and activate it\n      uv venv\n      .venv\\Scripts\\activate\n\n      # Install dependencies\n      uv add mcp[cli] httpx\n\n      # Create our server file\n      new-item weather.py\n      ```\n    </CodeGroup>\n\n    Now let's dive into building your server.\n\n    ## Building your server\n\n    ### Importing packages and setting up the instance\n\n    Add these to the top of your `weather.py`:\n\n    ```python\n    from typing import Any\n    import httpx\n    from mcp.server.fastmcp import FastMCP\n\n    # Initialize FastMCP server\n    mcp = FastMCP(\"weather\")\n\n    # Constants\n    NWS_API_BASE = \"https://api.weather.gov\"\n    USER_AGENT = \"weather-app/1.0\"\n    ```\n\n    The FastMCP class uses Python type hints and docstrings to automatically generate tool definitions, making it easy to create and maintain MCP tools.\n\n    ### Helper functions\n\n    Next, let's add our helper functions for querying and formatting the data from the National Weather Service API:\n\n    ```python\n    async def make_nws_request(url: str) -> dict[str, Any] | None:\n        \"\"\"Make a request to the NWS API with proper error handling.\"\"\"\n        headers = {\n            \"User-Agent\": USER_AGENT,\n            \"Accept\": \"application/geo+json\"\n        }\n        async with httpx.AsyncClient() as client:\n            try:\n                response = await client.get(url, headers=headers, timeout=30.0)\n                response.raise_for_status()\n                return response.json()\n            except Exception:\n                return None\n\n    def format_alert(feature: dict) -> str:\n        \"\"\"Format an alert feature into a readable string.\"\"\"\n        props = feature[\"properties\"]\n        return f\"\"\"\n    Event: {props.get('event', 'Unknown')}\n    Area: {props.get('areaDesc', 'Unknown')}\n    Severity: {props.get('severity', 'Unknown')}\n    Description: {props.get('description', 'No description available')}\n    Instructions: {props.get('instruction', 'No specific instructions provided')}\n    \"\"\"\n    ```\n\n    ### Implementing tool execution\n\n    The tool execution handler is responsible for actually executing the logic of each tool. Let's add it:\n\n    ```python\n    @mcp.tool()\n    async def get_alerts(state: str) -> str:\n        \"\"\"Get weather alerts for a US state.\n\n        Args:\n            state: Two-letter US state code (e.g. CA, NY)\n        \"\"\"\n        url = f\"{NWS_API_BASE}/alerts/active/area/{state}\"\n        data = await make_nws_request(url)\n\n        if not data or \"features\" not in data:\n            return \"Unable to fetch alerts or no alerts found.\"\n\n        if not data[\"features\"]:\n            return \"No active alerts for this state.\"\n\n        alerts = [format_alert(feature) for feature in data[\"features\"]]\n        return \"\\n---\\n\".join(alerts)\n\n    @mcp.tool()\n    async def get_forecast(latitude: float, longitude: float) -> str:\n        \"\"\"Get weather forecast for a location.\n\n        Args:\n            latitude: Latitude of the location\n            longitude: Longitude of the location\n        \"\"\"\n        # First get the forecast grid endpoint\n        points_url = f\"{NWS_API_BASE}/points/{latitude},{longitude}\"\n        points_data = await make_nws_request(points_url)\n\n        if not points_data:\n            return \"Unable to fetch forecast data for this location.\"\n\n        # Get the forecast URL from the points response\n        forecast_url = points_data[\"properties\"][\"forecast\"]\n        forecast_data = await make_nws_request(forecast_url)\n\n        if not forecast_data:\n            return \"Unable to fetch detailed forecast.\"\n\n        # Format the periods into a readable forecast\n        periods = forecast_data[\"properties\"][\"periods\"]\n        forecasts = []\n        for period in periods[:5]:  # Only show next 5 periods\n            forecast = f\"\"\"\n    {period['name']}:\n    Temperature: {period['temperature']}°{period['temperatureUnit']}\n    Wind: {period['windSpeed']} {period['windDirection']}\n    Forecast: {period['detailedForecast']}\n    \"\"\"\n            forecasts.append(forecast)\n\n        return \"\\n---\\n\".join(forecasts)\n    ```\n\n    ### Running the server\n\n    Finally, let's initialize and run the server:\n\n    ```python\n    if __name__ == \"__main__\":\n        # Initialize and run the server\n        mcp.run(transport='stdio')\n    ```\n\n    Your server is complete! Run `uv run weather.py` to confirm that everything's working.\n\n    Let's now test your server from an existing MCP host, Claude for Desktop.\n\n    ## Testing your server with Claude for Desktop\n\n    <Note>\n      Claude for Desktop is not yet available on Linux. Linux users can proceed to the [Building a client](/quickstart/client) tutorial to build an MCP client that connects to the server we just built.\n    </Note>\n\n    First, make sure you have Claude for Desktop installed. [You can install the latest version\n    here.](https://claude.ai/download) If you already have Claude for Desktop, **make sure it's updated to the latest version.**\n\n    We'll need to configure Claude for Desktop for whichever MCP servers you want to use. To do this, open your Claude for Desktop App configuration at `~/Library/Application Support/Claude/claude_desktop_config.json` in a text editor. Make sure to create the file if it doesn't exist.\n\n    For example, if you have [VS Code](https://code.visualstudio.com/) installed:\n\n    <Tabs>\n      <Tab title=\"MacOS/Linux\">\n        ```bash\n        code ~/Library/Application\\ Support/Claude/claude_desktop_config.json\n        ```\n      </Tab>\n\n      <Tab title=\"Windows\">\n        ```powershell\n        code $env:AppData\\Claude\\claude_desktop_config.json\n        ```\n      </Tab>\n    </Tabs>\n\n    You'll then add your servers in the `mcpServers` key. The MCP UI elements will only show up in Claude for Desktop if at least one server is properly configured.\n\n    In this case, we'll add our single weather server like so:\n\n    <Tabs>\n      <Tab title=\"MacOS/Linux\">\n        ```json Python\n        {\n          \"mcpServers\": {\n            \"weather\": {\n              \"command\": \"uv\",\n              \"args\": [\n                \"--directory\",\n                \"/ABSOLUTE/PATH/TO/PARENT/FOLDER/weather\",\n                \"run\",\n                \"weather.py\"\n              ]\n            }\n          }\n        }\n        ```\n      </Tab>\n\n      <Tab title=\"Windows\">\n        ```json Python\n        {\n          \"mcpServers\": {\n            \"weather\": {\n              \"command\": \"uv\",\n              \"args\": [\n                \"--directory\",\n                \"C:\\\\ABSOLUTE\\\\PATH\\\\TO\\\\PARENT\\\\FOLDER\\\\weather\",\n                \"run\",\n                \"weather.py\"\n              ]\n            }\n          }\n        }\n        ```\n      </Tab>\n    </Tabs>\n\n    <Warning>\n      You may need to put the full path to the `uv` executable in the `command` field. You can get this by running `which uv` on MacOS/Linux or `where uv` on Windows.\n    </Warning>\n\n    <Note>\n      Make sure you pass in the absolute path to your server.\n    </Note>\n\n    This tells Claude for Desktop:\n\n    1. There's an MCP server named \"weather\"\n    2. To launch it by running `uv --directory /ABSOLUTE/PATH/TO/PARENT/FOLDER/weather run weather.py`\n\n    Save the file, and restart **Claude for Desktop**.\n  </Tab>\n\n  <Tab title=\"Node\">\n    Let's get started with building our weather server! [You can find the complete code for what we'll be building here.](https://github.com/modelcontextprotocol/quickstart-resources/tree/main/weather-server-typescript)\n\n    ### Prerequisite knowledge\n\n    This quickstart assumes you have familiarity with:\n\n    * TypeScript\n    * LLMs like Claude\n\n    ### System requirements\n\n    For TypeScript, make sure you have the latest version of Node installed.\n\n    ### Set up your environment\n\n    First, let's install Node.js and npm if you haven't already. You can download them from [nodejs.org](https://nodejs.org/).\n    Verify your Node.js installation:\n\n    ```bash\n    node --version\n    npm --version\n    ```\n\n    For this tutorial, you'll need Node.js version 16 or higher.\n\n    Now, let's create and set up our project:\n\n    <CodeGroup>\n      ```bash MacOS/Linux\n      # Create a new directory for our project\n      mkdir weather\n      cd weather\n\n      # Initialize a new npm project\n      npm init -y\n\n      # Install dependencies\n      npm install @modelcontextprotocol/sdk zod\n      npm install -D @types/node typescript\n\n      # Create our files\n      mkdir src\n      touch src/index.ts\n      ```\n\n      ```powershell Windows\n      # Create a new directory for our project\n      md weather\n      cd weather\n\n      # Initialize a new npm project\n      npm init -y\n\n      # Install dependencies\n      npm install @modelcontextprotocol/sdk zod\n      npm install -D @types/node typescript\n\n      # Create our files\n      md src\n      new-item src\\index.ts\n      ```\n    </CodeGroup>\n\n    Update your package.json to add type: \"module\" and a build script:\n\n    ```json package.json\n    {\n      \"type\": \"module\",\n      \"bin\": {\n        \"weather\": \"./build/index.js\"\n      },\n      \"scripts\": {\n        \"build\": \"tsc && chmod 755 build/index.js\"\n      },\n      \"files\": [\"build\"]\n    }\n    ```\n\n    Create a `tsconfig.json` in the root of your project:\n\n    ```json tsconfig.json\n    {\n      \"compilerOptions\": {\n        \"target\": \"ES2022\",\n        \"module\": \"Node16\",\n        \"moduleResolution\": \"Node16\",\n        \"outDir\": \"./build\",\n        \"rootDir\": \"./src\",\n        \"strict\": true,\n        \"esModuleInterop\": true,\n        \"skipLibCheck\": true,\n        \"forceConsistentCasingInFileNames\": true\n      },\n      \"include\": [\"src/**/*\"],\n      \"exclude\": [\"node_modules\"]\n    }\n    ```\n\n    Now let's dive into building your server.\n\n    ## Building your server\n\n    ### Importing packages and setting up the instance\n\n    Add these to the top of your `src/index.ts`:\n\n    ```typescript\n    import { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\n    import { StdioServerTransport } from \"@modelcontextprotocol/sdk/server/stdio.js\";\n    import { z } from \"zod\";\n\n    const NWS_API_BASE = \"https://api.weather.gov\";\n    const USER_AGENT = \"weather-app/1.0\";\n\n    // Create server instance\n    const server = new McpServer({\n      name: \"weather\",\n      version: \"1.0.0\",\n      capabilities: {\n        resources: {},\n        tools: {},\n      },\n    });\n    ```\n\n    ### Helper functions\n\n    Next, let's add our helper functions for querying and formatting the data from the National Weather Service API:\n\n    ```typescript\n    // Helper function for making NWS API requests\n    async function makeNWSRequest<T>(url: string): Promise<T | null> {\n      const headers = {\n        \"User-Agent\": USER_AGENT,\n        Accept: \"application/geo+json\",\n      };\n\n      try {\n        const response = await fetch(url, { headers });\n        if (!response.ok) {\n          throw new Error(`HTTP error! status: ${response.status}`);\n        }\n        return (await response.json()) as T;\n      } catch (error) {\n        console.error(\"Error making NWS request:\", error);\n        return null;\n      }\n    }\n\n    interface AlertFeature {\n      properties: {\n        event?: string;\n        areaDesc?: string;\n        severity?: string;\n        status?: string;\n        headline?: string;\n      };\n    }\n\n    // Format alert data\n    function formatAlert(feature: AlertFeature): string {\n      const props = feature.properties;\n      return [\n        `Event: ${props.event || \"Unknown\"}`,\n        `Area: ${props.areaDesc || \"Unknown\"}`,\n        `Severity: ${props.severity || \"Unknown\"}`,\n        `Status: ${props.status || \"Unknown\"}`,\n        `Headline: ${props.headline || \"No headline\"}`,\n        \"---\",\n      ].join(\"\\n\");\n    }\n\n    interface ForecastPeriod {\n      name?: string;\n      temperature?: number;\n      temperatureUnit?: string;\n      windSpeed?: string;\n      windDirection?: string;\n      shortForecast?: string;\n    }\n\n    interface AlertsResponse {\n      features: AlertFeature[];\n    }\n\n    interface PointsResponse {\n      properties: {\n        forecast?: string;\n      };\n    }\n\n    interface ForecastResponse {\n      properties: {\n        periods: ForecastPeriod[];\n      };\n    }\n    ```\n\n    ### Implementing tool execution\n\n    The tool execution handler is responsible for actually executing the logic of each tool. Let's add it:\n\n    ```typescript\n    // Register weather tools\n    server.tool(\n      \"get-alerts\",\n      \"Get weather alerts for a state\",\n      {\n        state: z.string().length(2).describe(\"Two-letter state code (e.g. CA, NY)\"),\n      },\n      async ({ state }) => {\n        const stateCode = state.toUpperCase();\n        const alertsUrl = `${NWS_API_BASE}/alerts?area=${stateCode}`;\n        const alertsData = await makeNWSRequest<AlertsResponse>(alertsUrl);\n\n        if (!alertsData) {\n          return {\n            content: [\n              {\n                type: \"text\",\n                text: \"Failed to retrieve alerts data\",\n              },\n            ],\n          };\n        }\n\n        const features = alertsData.features || [];\n        if (features.length === 0) {\n          return {\n            content: [\n              {\n                type: \"text\",\n                text: `No active alerts for ${stateCode}`,\n              },\n            ],\n          };\n        }\n\n        const formattedAlerts = features.map(formatAlert);\n        const alertsText = `Active alerts for ${stateCode}:\\n\\n${formattedAlerts.join(\"\\n\")}`;\n\n        return {\n          content: [\n            {\n              type: \"text\",\n              text: alertsText,\n            },\n          ],\n        };\n      },\n    );\n\n    server.tool(\n      \"get-forecast\",\n      \"Get weather forecast for a location\",\n      {\n        latitude: z.number().min(-90).max(90).describe(\"Latitude of the location\"),\n        longitude: z\n          .number()\n          .min(-180)\n          .max(180)\n          .describe(\"Longitude of the location\"),\n      },\n      async ({ latitude, longitude }) => {\n        // Get grid point data\n        const pointsUrl = `${NWS_API_BASE}/points/${latitude.toFixed(4)},${longitude.toFixed(4)}`;\n        const pointsData = await makeNWSRequest<PointsResponse>(pointsUrl);\n\n        if (!pointsData) {\n          return {\n            content: [\n              {\n                type: \"text\",\n                text: `Failed to retrieve grid point data for coordinates: ${latitude}, ${longitude}. This location may not be supported by the NWS API (only US locations are supported).`,\n              },\n            ],\n          };\n        }\n\n        const forecastUrl = pointsData.properties?.forecast;\n        if (!forecastUrl) {\n          return {\n            content: [\n              {\n                type: \"text\",\n                text: \"Failed to get forecast URL from grid point data\",\n              },\n            ],\n          };\n        }\n\n        // Get forecast data\n        const forecastData = await makeNWSRequest<ForecastResponse>(forecastUrl);\n        if (!forecastData) {\n          return {\n            content: [\n              {\n                type: \"text\",\n                text: \"Failed to retrieve forecast data\",\n              },\n            ],\n          };\n        }\n\n        const periods = forecastData.properties?.periods || [];\n        if (periods.length === 0) {\n          return {\n            content: [\n              {\n                type: \"text\",\n                text: \"No forecast periods available\",\n              },\n            ],\n          };\n        }\n\n        // Format forecast periods\n        const formattedForecast = periods.map((period: ForecastPeriod) =>\n          [\n            `${period.name || \"Unknown\"}:`,\n            `Temperature: ${period.temperature || \"Unknown\"}°${period.temperatureUnit || \"F\"}`,\n            `Wind: ${period.windSpeed || \"Unknown\"} ${period.windDirection || \"\"}`,\n            `${period.shortForecast || \"No forecast available\"}`,\n            \"---\",\n          ].join(\"\\n\"),\n        );\n\n        const forecastText = `Forecast for ${latitude}, ${longitude}:\\n\\n${formattedForecast.join(\"\\n\")}`;\n\n        return {\n          content: [\n            {\n              type: \"text\",\n              text: forecastText,\n            },\n          ],\n        };\n      },\n    );\n    ```\n\n    ### Running the server\n\n    Finally, implement the main function to run the server:\n\n    ```typescript\n    async function main() {\n      const transport = new StdioServerTransport();\n      await server.connect(transport);\n      console.error(\"Weather MCP Server running on stdio\");\n    }\n\n    main().catch((error) => {\n      console.error(\"Fatal error in main():\", error);\n      process.exit(1);\n    });\n    ```\n\n    Make sure to run `npm run build` to build your server! This is a very important step in getting your server to connect.\n\n    Let's now test your server from an existing MCP host, Claude for Desktop.\n\n    ## Testing your server with Claude for Desktop\n\n    <Note>\n      Claude for Desktop is not yet available on Linux. Linux users can proceed to the [Building a client](/quickstart/client) tutorial to build an MCP client that connects to the server we just built.\n    </Note>\n\n    First, make sure you have Claude for Desktop installed. [You can install the latest version\n    here.](https://claude.ai/download) If you already have Claude for Desktop, **make sure it's updated to the latest version.**\n\n    We'll need to configure Claude for Desktop for whichever MCP servers you want to use. To do this, open your Claude for Desktop App configuration at `~/Library/Application Support/Claude/claude_desktop_config.json` in a text editor. Make sure to create the file if it doesn't exist.\n\n    For example, if you have [VS Code](https://code.visualstudio.com/) installed:\n\n    <Tabs>\n      <Tab title=\"MacOS/Linux\">\n        ```bash\n        code ~/Library/Application\\ Support/Claude/claude_desktop_config.json\n        ```\n      </Tab>\n\n      <Tab title=\"Windows\">\n        ```powershell\n        code $env:AppData\\Claude\\claude_desktop_config.json\n        ```\n      </Tab>\n    </Tabs>\n\n    You'll then add your servers in the `mcpServers` key. The MCP UI elements will only show up in Claude for Desktop if at least one server is properly configured.\n\n    In this case, we'll add our single weather server like so:\n\n    <Tabs>\n      <Tab title=\"MacOS/Linux\">\n        <CodeGroup>\n          ```json Node\n          {\n            \"mcpServers\": {\n              \"weather\": {\n                \"command\": \"node\",\n                \"args\": [\"/ABSOLUTE/PATH/TO/PARENT/FOLDER/weather/build/index.js\"]\n              }\n            }\n          }\n          ```\n        </CodeGroup>\n      </Tab>\n\n      <Tab title=\"Windows\">\n        <CodeGroup>\n          ```json Node\n          {\n            \"mcpServers\": {\n              \"weather\": {\n                \"command\": \"node\",\n                \"args\": [\"C:\\\\PATH\\\\TO\\\\PARENT\\\\FOLDER\\\\weather\\\\build\\\\index.js\"]\n              }\n            }\n          }\n          ```\n        </CodeGroup>\n      </Tab>\n    </Tabs>\n\n    This tells Claude for Desktop:\n\n    1. There's an MCP server named \"weather\"\n    2. Launch it by running `node /ABSOLUTE/PATH/TO/PARENT/FOLDER/weather/build/index.js`\n\n    Save the file, and restart **Claude for Desktop**.\n  </Tab>\n\n  <Tab title=\"Java\">\n    <Note>\n      This is a quickstart demo based on Spring AI MCP auto-configuration and boot starters.\n      To learn how to create sync and async MCP Servers, manually, consult the [Java SDK Server](/sdk/java/mcp-server) documentation.\n    </Note>\n\n    Let's get started with building our weather server!\n    [You can find the complete code for what we'll be building here.](https://github.com/spring-projects/spring-ai-examples/tree/main/model-context-protocol/weather/starter-stdio-server)\n\n    For more information, see the [MCP Server Boot Starter](https://docs.spring.io/spring-ai/reference/api/mcp/mcp-server-boot-starter-docs.html) reference documentation.\n    For manual MCP Server implementation, refer to the [MCP Server Java SDK documentation](/sdk/java/mcp-server).\n\n    ### System requirements\n\n    * Java 17 or higher installed.\n    * [Spring Boot 3.3.x](https://docs.spring.io/spring-boot/installing.html) or higher\n\n    ### Set up your environment\n\n    Use the [Spring Initializer](https://start.spring.io/) to bootstrap the project.\n\n    You will need to add the following dependencies:\n\n    <Tabs>\n      <Tab title=\"Maven\">\n        ```xml\n        <dependencies>\n              <dependency>\n                  <groupId>org.springframework.ai</groupId>\n                  <artifactId>spring-ai-starter-mcp-server</artifactId>\n              </dependency>\n\n              <dependency>\n                  <groupId>org.springframework</groupId>\n                  <artifactId>spring-web</artifactId>\n              </dependency>\n        </dependencies>\n        ```\n      </Tab>\n\n      <Tab title=\"Gradle\">\n        ```groovy\n        dependencies {\n          implementation platform(\"org.springframework.ai:spring-ai-starter-mcp-server\")\n          implementation platform(\"org.springframework:spring-web\")\n        }\n        ```\n      </Tab>\n    </Tabs>\n\n    Then configure your application by setting the application properties:\n\n    <CodeGroup>\n      ```bash application.properties\n      spring.main.bannerMode=off\n      logging.pattern.console=\n      ```\n\n      ```yaml application.yml\n      logging:\n        pattern:\n          console:\n      spring:\n        main:\n          banner-mode: off\n      ```\n    </CodeGroup>\n\n    The [Server Configuration Properties](https://docs.spring.io/spring-ai/reference/api/mcp/mcp-server-boot-starter-docs.html#_configuration_properties) documents all available properties.\n\n    Now let's dive into building your server.\n\n    ## Building your server\n\n    ### Weather Service\n\n    Let's implement a [WeatherService.java](https://github.com/spring-projects/spring-ai-examples/blob/main/model-context-protocol/weather/starter-stdio-server/src/main/java/org/springframework/ai/mcp/sample/server/WeatherService.java) that uses a REST client to query the data from the National Weather Service API:\n\n    ```java\n    @Service\n    public class WeatherService {\n\n    \tprivate final RestClient restClient;\n\n    \tpublic WeatherService() {\n    \t\tthis.restClient = RestClient.builder()\n    \t\t\t.baseUrl(\"https://api.weather.gov\")\n    \t\t\t.defaultHeader(\"Accept\", \"application/geo+json\")\n    \t\t\t.defaultHeader(\"User-Agent\", \"WeatherApiClient/1.0 (your@email.com)\")\n    \t\t\t.build();\n    \t}\n\n      @Tool(description = \"Get weather forecast for a specific latitude/longitude\")\n      public String getWeatherForecastByLocation(\n          double latitude,   // Latitude coordinate\n          double longitude   // Longitude coordinate\n      ) {\n          // Returns detailed forecast including:\n          // - Temperature and unit\n          // - Wind speed and direction\n          // - Detailed forecast description\n      }\n\n      @Tool(description = \"Get weather alerts for a US state\")\n      public String getAlerts(\n          @ToolParam(description = \"Two-letter US state code (e.g. CA, NY)\") String state\n      ) {\n          // Returns active alerts including:\n          // - Event type\n          // - Affected area\n          // - Severity\n          // - Description\n          // - Safety instructions\n      }\n\n      // ......\n    }\n    ```\n\n    The `@Service` annotation with auto-register the service in your application context.\n    The Spring AI `@Tool` annotation, making it easy to create and maintain MCP tools.\n\n    The auto-configuration will automatically register these tools with the MCP server.\n\n    ### Create your Boot Application\n\n    ```java\n    @SpringBootApplication\n    public class McpServerApplication {\n\n    \tpublic static void main(String[] args) {\n    \t\tSpringApplication.run(McpServerApplication.class, args);\n    \t}\n\n    \t@Bean\n    \tpublic ToolCallbackProvider weatherTools(WeatherService weatherService) {\n    \t\treturn  MethodToolCallbackProvider.builder().toolObjects(weatherService).build();\n    \t}\n    }\n    ```\n\n    Uses the the `MethodToolCallbackProvider` utils to convert the `@Tools` into actionable callbacks used by the MCP server.\n\n    ### Running the server\n\n    Finally, let's build the server:\n\n    ```bash\n    ./mvnw clean install\n    ```\n\n    This will generate a `mcp-weather-stdio-server-0.0.1-SNAPSHOT.jar` file within the `target` folder.\n\n    Let's now test your server from an existing MCP host, Claude for Desktop.\n\n    ## Testing your server with Claude for Desktop\n\n    <Note>\n      Claude for Desktop is not yet available on Linux.\n    </Note>\n\n    First, make sure you have Claude for Desktop installed.\n    [You can install the latest version here.](https://claude.ai/download) If you already have Claude for Desktop, **make sure it's updated to the latest version.**\n\n    We'll need to configure Claude for Desktop for whichever MCP servers you want to use.\n    To do this, open your Claude for Desktop App configuration at `~/Library/Application Support/Claude/claude_desktop_config.json` in a text editor.\n    Make sure to create the file if it doesn't exist.\n\n    For example, if you have [VS Code](https://code.visualstudio.com/) installed:\n\n    <Tabs>\n      <Tab title=\"MacOS/Linux\">\n        ```bash\n        code ~/Library/Application\\ Support/Claude/claude_desktop_config.json\n        ```\n      </Tab>\n\n      <Tab title=\"Windows\">\n        ```powershell\n        code $env:AppData\\Claude\\claude_desktop_config.json\n        ```\n      </Tab>\n    </Tabs>\n\n    You'll then add your servers in the `mcpServers` key.\n    The MCP UI elements will only show up in Claude for Desktop if at least one server is properly configured.\n\n    In this case, we'll add our single weather server like so:\n\n    <Tabs>\n      <Tab title=\"MacOS/Linux\">\n        ```json java\n        {\n          \"mcpServers\": {\n            \"spring-ai-mcp-weather\": {\n              \"command\": \"java\",\n              \"args\": [\n                \"-Dspring.ai.mcp.server.stdio=true\",\n                \"-jar\",\n                \"/ABSOLUTE/PATH/TO/PARENT/FOLDER/mcp-weather-stdio-server-0.0.1-SNAPSHOT.jar\"\n              ]\n            }\n          }\n        }\n        ```\n      </Tab>\n\n      <Tab title=\"Windows\">\n        ```json java\n        {\n          \"mcpServers\": {\n            \"spring-ai-mcp-weather\": {\n              \"command\": \"java\",\n              \"args\": [\n                \"-Dspring.ai.mcp.server.transport=STDIO\",\n                \"-jar\",\n                \"C:\\\\ABSOLUTE\\\\PATH\\\\TO\\\\PARENT\\\\FOLDER\\\\weather\\\\mcp-weather-stdio-server-0.0.1-SNAPSHOT.jar\"\n              ]\n            }\n          }\n        }\n        ```\n      </Tab>\n    </Tabs>\n\n    <Note>\n      Make sure you pass in the absolute path to your server.\n    </Note>\n\n    This tells Claude for Desktop:\n\n    1. There's an MCP server named \"my-weather-server\"\n    2. To launch it by running `java -jar /ABSOLUTE/PATH/TO/PARENT/FOLDER/mcp-weather-stdio-server-0.0.1-SNAPSHOT.jar`\n\n    Save the file, and restart **Claude for Desktop**.\n\n    ## Testing your server with Java client\n\n    ### Create a MCP Client manually\n\n    Use the `McpClient` to connect to the server:\n\n    ```java\n    var stdioParams = ServerParameters.builder(\"java\")\n      .args(\"-jar\", \"/ABSOLUTE/PATH/TO/PARENT/FOLDER/mcp-weather-stdio-server-0.0.1-SNAPSHOT.jar\")\n      .build();\n\n    var stdioTransport = new StdioClientTransport(stdioParams);\n\n    var mcpClient = McpClient.sync(stdioTransport).build();\n\n    mcpClient.initialize();\n\n    ListToolsResult toolsList = mcpClient.listTools();\n\n    CallToolResult weather = mcpClient.callTool(\n      new CallToolRequest(\"getWeatherForecastByLocation\",\n          Map.of(\"latitude\", \"47.6062\", \"longitude\", \"-122.3321\")));\n\n    CallToolResult alert = mcpClient.callTool(\n      new CallToolRequest(\"getAlerts\", Map.of(\"state\", \"NY\")));\n\n    mcpClient.closeGracefully();\n    ```\n\n    ### Use MCP Client Boot Starter\n\n    Create a new boot starter application using the `spring-ai-starter-mcp-client` dependency:\n\n    ```xml\n    <dependency>\n        <groupId>org.springframework.ai</groupId>\n        <artifactId>spring-ai-starter-mcp-client</artifactId>\n    </dependency>\n    ```\n\n    and set the `spring.ai.mcp.client.stdio.servers-configuration` property to point to your `claude_desktop_config.json`.\n    You can reuse the existing Anthropic Desktop configuration:\n\n    ```properties\n    spring.ai.mcp.client.stdio.servers-configuration=file:PATH/TO/claude_desktop_config.json\n    ```\n\n    When you start your client application, the auto-configuration will create, automatically MCP clients from the claude\\_desktop\\_config.json.\n\n    For more information, see the [MCP Client Boot Starters](https://docs.spring.io/spring-ai/reference/api/mcp/mcp-server-boot-client-docs.html) reference documentation.\n\n    ## More Java MCP Server examples\n\n    The [starter-webflux-server](https://github.com/spring-projects/spring-ai-examples/tree/main/model-context-protocol/weather/starter-webflux-server) demonstrates how to create a MCP server using SSE transport.\n    It showcases how to define and register MCP Tools, Resources, and Prompts, using the Spring Boot's auto-configuration capabilities.\n  </Tab>\n\n  <Tab title=\"Kotlin\">\n    Let's get started with building our weather server! [You can find the complete code for what we'll be building here.](https://github.com/modelcontextprotocol/kotlin-sdk/tree/main/samples/weather-stdio-server)\n\n    ### Prerequisite knowledge\n\n    This quickstart assumes you have familiarity with:\n\n    * Kotlin\n    * LLMs like Claude\n\n    ### System requirements\n\n    * Java 17 or higher installed.\n\n    ### Set up your environment\n\n    First, let's install `java` and `gradle` if you haven't already.\n    You can download `java` from [official Oracle JDK website](https://www.oracle.com/java/technologies/downloads/).\n    Verify your `java` installation:\n\n    ```bash\n    java --version\n    ```\n\n    Now, let's create and set up your project:\n\n    <CodeGroup>\n      ```bash MacOS/Linux\n      # Create a new directory for our project\n      mkdir weather\n      cd weather\n\n      # Initialize a new kotlin project\n      gradle init\n      ```\n\n      ```powershell Windows\n      # Create a new directory for our project\n      md weather\n      cd weather\n\n      # Initialize a new kotlin project\n      gradle init\n      ```\n    </CodeGroup>\n\n    After running `gradle init`, you will be presented with options for creating your project.\n    Select **Application** as the project type, **Kotlin** as the programming language, and **Java 17** as the Java version.\n\n    Alternatively, you can create a Kotlin application using the [IntelliJ IDEA project wizard](https://kotlinlang.org/docs/jvm-get-started.html).\n\n    After creating the project, add the following dependencies:\n\n    <CodeGroup>\n      ```kotlin build.gradle.kts\n      val mcpVersion = \"0.4.0\"\n      val slf4jVersion = \"2.0.9\"\n      val ktorVersion = \"3.1.1\"\n\n      dependencies {\n          implementation(\"io.modelcontextprotocol:kotlin-sdk:$mcpVersion\")\n          implementation(\"org.slf4j:slf4j-nop:$slf4jVersion\")\n          implementation(\"io.ktor:ktor-client-content-negotiation:$ktorVersion\")\n          implementation(\"io.ktor:ktor-serialization-kotlinx-json:$ktorVersion\")\n      }\n      ```\n\n      ```groovy build.gradle\n      def mcpVersion = '0.3.0'\n      def slf4jVersion = '2.0.9'\n      def ktorVersion = '3.1.1'\n\n      dependencies {\n          implementation \"io.modelcontextprotocol:kotlin-sdk:$mcpVersion\"\n          implementation \"org.slf4j:slf4j-nop:$slf4jVersion\"\n          implementation \"io.ktor:ktor-client-content-negotiation:$ktorVersion\"\n          implementation \"io.ktor:ktor-serialization-kotlinx-json:$ktorVersion\"\n      }\n      ```\n    </CodeGroup>\n\n    Also, add the following plugins to your build script:\n\n    <CodeGroup>\n      ```kotlin build.gradle.kts\n      plugins {\n          kotlin(\"plugin.serialization\") version \"your_version_of_kotlin\"\n          id(\"com.github.johnrengelman.shadow\") version \"8.1.1\"\n      }\n      ```\n\n      ```groovy build.gradle\n      plugins {\n          id 'org.jetbrains.kotlin.plugin.serialization' version 'your_version_of_kotlin'\n          id 'com.github.johnrengelman.shadow' version '8.1.1'\n      }\n      ```\n    </CodeGroup>\n\n    Now let’s dive into building your server.\n\n    ## Building your server\n\n    ### Setting up the instance\n\n    Add a server initialization function:\n\n    ```kotlin\n    // Main function to run the MCP server\n    fun `run mcp server`() {\n        // Create the MCP Server instance with a basic implementation\n        val server = Server(\n            Implementation(\n                name = \"weather\", // Tool name is \"weather\"\n                version = \"1.0.0\" // Version of the implementation\n            ),\n            ServerOptions(\n                capabilities = ServerCapabilities(tools = ServerCapabilities.Tools(listChanged = true))\n            )\n        )\n\n        // Create a transport using standard IO for server communication\n        val transport = StdioServerTransport(\n            System.`in`.asInput(),\n            System.out.asSink().buffered()\n        )\n\n        runBlocking {\n            server.connect(transport)\n            val done = Job()\n            server.onClose {\n                done.complete()\n            }\n            done.join()\n        }\n    }\n    ```\n\n    ### Weather API helper functions\n\n    Next, let's add functions and data classes for querying and converting responses from the National Weather Service API:\n\n    ```kotlin\n    // Extension function to fetch forecast information for given latitude and longitude\n    suspend fun HttpClient.getForecast(latitude: Double, longitude: Double): List<String> {\n        val points = this.get(\"/points/$latitude,$longitude\").body<Points>()\n        val forecast = this.get(points.properties.forecast).body<Forecast>()\n        return forecast.properties.periods.map { period ->\n            \"\"\"\n                ${period.name}:\n                Temperature: ${period.temperature} ${period.temperatureUnit}\n                Wind: ${period.windSpeed} ${period.windDirection}\n                Forecast: ${period.detailedForecast}\n            \"\"\".trimIndent()\n        }\n    }\n\n    // Extension function to fetch weather alerts for a given state\n    suspend fun HttpClient.getAlerts(state: String): List<String> {\n        val alerts = this.get(\"/alerts/active/area/$state\").body<Alert>()\n        return alerts.features.map { feature ->\n            \"\"\"\n                Event: ${feature.properties.event}\n                Area: ${feature.properties.areaDesc}\n                Severity: ${feature.properties.severity}\n                Description: ${feature.properties.description}\n                Instruction: ${feature.properties.instruction}\n            \"\"\".trimIndent()\n        }\n    }\n\n    @Serializable\n    data class Points(\n        val properties: Properties\n    ) {\n        @Serializable\n        data class Properties(val forecast: String)\n    }\n\n    @Serializable\n    data class Forecast(\n        val properties: Properties\n    ) {\n        @Serializable\n        data class Properties(val periods: List<Period>)\n\n        @Serializable\n        data class Period(\n            val number: Int, val name: String, val startTime: String, val endTime: String,\n            val isDaytime: Boolean, val temperature: Int, val temperatureUnit: String,\n            val temperatureTrend: String, val probabilityOfPrecipitation: JsonObject,\n            val windSpeed: String, val windDirection: String,\n            val shortForecast: String, val detailedForecast: String,\n        )\n    }\n\n    @Serializable\n    data class Alert(\n        val features: List<Feature>\n    ) {\n        @Serializable\n        data class Feature(\n            val properties: Properties\n        )\n\n        @Serializable\n        data class Properties(\n            val event: String, val areaDesc: String, val severity: String,\n            val description: String, val instruction: String?,\n        )\n    }\n    ```\n\n    ### Implementing tool execution\n\n    The tool execution handler is responsible for actually executing the logic of each tool. Let's add it:\n\n    ```kotlin\n    // Create an HTTP client with a default request configuration and JSON content negotiation\n    val httpClient = HttpClient {\n        defaultRequest {\n            url(\"https://api.weather.gov\")\n            headers {\n                append(\"Accept\", \"application/geo+json\")\n                append(\"User-Agent\", \"WeatherApiClient/1.0\")\n            }\n            contentType(ContentType.Application.Json)\n        }\n        // Install content negotiation plugin for JSON serialization/deserialization\n        install(ContentNegotiation) { json(Json { ignoreUnknownKeys = true }) }\n    }\n\n    // Register a tool to fetch weather alerts by state\n    server.addTool(\n        name = \"get_alerts\",\n        description = \"\"\"\n            Get weather alerts for a US state. Input is Two-letter US state code (e.g. CA, NY)\n        \"\"\".trimIndent(),\n        inputSchema = Tool.Input(\n            properties = buildJsonObject {\n                putJsonObject(\"state\") {\n                    put(\"type\", \"string\")\n                    put(\"description\", \"Two-letter US state code (e.g. CA, NY)\")\n                }\n            },\n            required = listOf(\"state\")\n        )\n    ) { request ->\n        val state = request.arguments[\"state\"]?.jsonPrimitive?.content\n        if (state == null) {\n            return@addTool CallToolResult(\n                content = listOf(TextContent(\"The 'state' parameter is required.\"))\n            )\n        }\n\n        val alerts = httpClient.getAlerts(state)\n\n        CallToolResult(content = alerts.map { TextContent(it) })\n    }\n\n    // Register a tool to fetch weather forecast by latitude and longitude\n    server.addTool(\n        name = \"get_forecast\",\n        description = \"\"\"\n            Get weather forecast for a specific latitude/longitude\n        \"\"\".trimIndent(),\n        inputSchema = Tool.Input(\n            properties = buildJsonObject {\n                putJsonObject(\"latitude\") { put(\"type\", \"number\") }\n                putJsonObject(\"longitude\") { put(\"type\", \"number\") }\n            },\n            required = listOf(\"latitude\", \"longitude\")\n        )\n    ) { request ->\n        val latitude = request.arguments[\"latitude\"]?.jsonPrimitive?.doubleOrNull\n        val longitude = request.arguments[\"longitude\"]?.jsonPrimitive?.doubleOrNull\n        if (latitude == null || longitude == null) {\n            return@addTool CallToolResult(\n                content = listOf(TextContent(\"The 'latitude' and 'longitude' parameters are required.\"))\n            )\n        }\n\n        val forecast = httpClient.getForecast(latitude, longitude)\n\n        CallToolResult(content = forecast.map { TextContent(it) })\n    }\n    ```\n\n    ### Running the server\n\n    Finally, implement the main function to run the server:\n\n    ```kotlin\n    fun main() = `run mcp server`()\n    ```\n\n    Make sure to run `./gradlew build` to build your server. This is a very important step in getting your server to connect.\n\n    Let's now test your server from an existing MCP host, Claude for Desktop.\n\n    ## Testing your server with Claude for Desktop\n\n    <Note>\n      Claude for Desktop is not yet available on Linux. Linux users can proceed to the [Building a client](/quickstart/client) tutorial to build an MCP client that connects to the server we just built.\n    </Note>\n\n    First, make sure you have Claude for Desktop installed. [You can install the latest version\n    here.](https://claude.ai/download) If you already have Claude for Desktop, **make sure it's updated to the latest version.**\n\n    We'll need to configure Claude for Desktop for whichever MCP servers you want to use.\n    To do this, open your Claude for Desktop App configuration at `~/Library/Application Support/Claude/claude_desktop_config.json` in a text editor.\n    Make sure to create the file if it doesn't exist.\n\n    For example, if you have [VS Code](https://code.visualstudio.com/) installed:\n\n    <CodeGroup>\n      ```bash MacOS/Linux\n      code ~/Library/Application\\ Support/Claude/claude_desktop_config.json\n      ```\n\n      ```powershell Windows\n      code $env:AppData\\Claude\\claude_desktop_config.json\n      ```\n    </CodeGroup>\n\n    You'll then add your servers in the `mcpServers` key.\n    The MCP UI elements will only show up in Claude for Desktop if at least one server is properly configured.\n\n    In this case, we'll add our single weather server like so:\n\n    <CodeGroup>\n      ```json MacOS/Linux\n      {\n        \"mcpServers\": {\n          \"weather\": {\n            \"command\": \"java\",\n            \"args\": [\n              \"-jar\",\n              \"/ABSOLUTE/PATH/TO/PARENT/FOLDER/weather/build/libs/weather-0.1.0-all.jar\"\n            ]\n          }\n        }\n      }\n      ```\n\n      ```json Windows\n      {\n        \"mcpServers\": {\n          \"weather\": {\n            \"command\": \"java\",\n            \"args\": [\n              \"-jar\",\n              \"C:\\\\PATH\\\\TO\\\\PARENT\\\\FOLDER\\\\weather\\\\build\\\\libs\\\\weather-0.1.0-all.jar\"\n            ]\n          }\n        }\n      }\n      ```\n    </CodeGroup>\n\n    This tells Claude for Desktop:\n\n    1. There's an MCP server named \"weather\"\n    2. Launch it by running `java -jar /ABSOLUTE/PATH/TO/PARENT/FOLDER/weather/build/libs/weather-0.1.0-all.jar`\n\n    Save the file, and restart **Claude for Desktop**.\n  </Tab>\n\n  <Tab title=\"C#\">\n    Let's get started with building our weather server! [You can find the complete code for what we'll be building here.](https://github.com/modelcontextprotocol/csharp-sdk/tree/main/samples/QuickstartWeatherServer)\n\n    ### Prerequisite knowledge\n\n    This quickstart assumes you have familiarity with:\n\n    * C#\n    * LLMs like Claude\n    * .NET 8 or higher\n\n    ### System requirements\n\n    * [.NET 8 SDK](https://dotnet.microsoft.com/download/dotnet/8.0) or higher installed.\n\n    ### Set up your environment\n\n    First, let's install `dotnet` if you haven't already. You can download `dotnet` from [official Microsoft .NET website](https://dotnet.microsoft.com/download/). Verify your `dotnet` installation:\n\n    ```bash\n    dotnet --version\n    ```\n\n    Now, let's create and set up your project:\n\n    <CodeGroup>\n      ```bash MacOS/Linux\n      # Create a new directory for our project\n      mkdir weather\n      cd weather\n      # Initialize a new C# project\n      dotnet new console\n      ```\n\n      ```powershell Windows\n      # Create a new directory for our project\n      mkdir weather\n      cd weather\n      # Initialize a new C# project\n      dotnet new console\n      ```\n    </CodeGroup>\n\n    After running `dotnet new console`, you will be presented with a new C# project.\n    You can open the project in your favorite IDE, such as [Visual Studio](https://visualstudio.microsoft.com/) or [Rider](https://www.jetbrains.com/rider/).\n    Alternatively, you can create a C# application using the [Visual Studio project wizard](https://learn.microsoft.com/en-us/visualstudio/get-started/csharp/tutorial-console?view=vs-2022).\n    After creating the project, add NuGet package for the Model Context Protocol SDK and hosting:\n\n    ```bash\n    # Add the Model Context Protocol SDK NuGet package\n    dotnet add package ModelContextProtocol --prerelease\n    # Add the .NET Hosting NuGet package\n    dotnet add package Microsoft.Extensions.Hosting\n    ```\n\n    Now let’s dive into building your server.\n\n    ## Building your server\n\n    Open the `Program.cs` file in your project and replace its contents with the following code:\n\n    ```csharp\n    using Microsoft.Extensions.DependencyInjection;\n    using Microsoft.Extensions.Hosting;\n    using ModelContextProtocol;\n    using System.Net.Http.Headers;\n\n    var builder = Host.CreateEmptyApplicationBuilder(settings: null);\n\n    builder.Services.AddMcpServer()\n        .WithStdioServerTransport()\n        .WithToolsFromAssembly();\n\n    builder.Services.AddSingleton(_ =>\n    {\n        var client = new HttpClient() { BaseAddress = new Uri(\"https://api.weather.gov\") };\n        client.DefaultRequestHeaders.UserAgent.Add(new ProductInfoHeaderValue(\"weather-tool\", \"1.0\"));\n        return client;\n    });\n\n    var app = builder.Build();\n\n    await app.RunAsync();\n    ```\n\n    <Note>\n      When creating the `ApplicationHostBuilder`, ensure you use `CreateEmptyApplicationBuilder` instead of `CreateDefaultBuilder`. This ensures that the server does not write any additional messages to the console. This is only necessary for servers using STDIO transport.\n    </Note>\n\n    This code sets up a basic console application that uses the Model Context Protocol SDK to create an MCP server with standard I/O transport.\n\n    ### Weather API helper functions\n\n    Create an extension class for `HttpClient` which helps simplify JSON request handling:\n\n    ```csharp\n    using System.Text.Json;\n\n    internal static class HttpClientExt\n    {\n        public static async Task<JsonDocument> ReadJsonDocumentAsync(this HttpClient client, string requestUri)\n        {\n            using var response = await client.GetAsync(requestUri);\n            response.EnsureSuccessStatusCode();\n            return await JsonDocument.ParseAsync(await response.Content.ReadAsStreamAsync());\n        }\n    }\n    ```\n\n    Next, define a class with the tool execution handlers for querying and converting responses from the National Weather Service API:\n\n    ```csharp\n    using ModelContextProtocol.Server;\n    using System.ComponentModel;\n    using System.Globalization;\n    using System.Text.Json;\n\n    namespace QuickstartWeatherServer.Tools;\n\n    [McpServerToolType]\n    public static class WeatherTools\n    {\n        [McpServerTool, Description(\"Get weather alerts for a US state.\")]\n        public static async Task<string> GetAlerts(\n            HttpClient client,\n            [Description(\"The US state to get alerts for.\")] string state)\n        {\n            using var jsonDocument = await client.ReadJsonDocumentAsync($\"/alerts/active/area/{state}\");\n            var jsonElement = jsonDocument.RootElement;\n            var alerts = jsonElement.GetProperty(\"features\").EnumerateArray();\n\n            if (!alerts.Any())\n            {\n                return \"No active alerts for this state.\";\n            }\n\n            return string.Join(\"\\n--\\n\", alerts.Select(alert =>\n            {\n                JsonElement properties = alert.GetProperty(\"properties\");\n                return $\"\"\"\n                        Event: {properties.GetProperty(\"event\").GetString()}\n                        Area: {properties.GetProperty(\"areaDesc\").GetString()}\n                        Severity: {properties.GetProperty(\"severity\").GetString()}\n                        Description: {properties.GetProperty(\"description\").GetString()}\n                        Instruction: {properties.GetProperty(\"instruction\").GetString()}\n                        \"\"\";\n            }));\n        }\n\n        [McpServerTool, Description(\"Get weather forecast for a location.\")]\n        public static async Task<string> GetForecast(\n            HttpClient client,\n            [Description(\"Latitude of the location.\")] double latitude,\n            [Description(\"Longitude of the location.\")] double longitude)\n        {\n            var pointUrl = string.Create(CultureInfo.InvariantCulture, $\"/points/{latitude},{longitude}\");\n            using var jsonDocument = await client.ReadJsonDocumentAsync(pointUrl);\n            var forecastUrl = jsonDocument.RootElement.GetProperty(\"properties\").GetProperty(\"forecast\").GetString()\n                ?? throw new Exception($\"No forecast URL provided by {client.BaseAddress}points/{latitude},{longitude}\");\n\n            using var forecastDocument = await client.ReadJsonDocumentAsync(forecastUrl);\n            var periods = forecastDocument.RootElement.GetProperty(\"properties\").GetProperty(\"periods\").EnumerateArray();\n\n            return string.Join(\"\\n---\\n\", periods.Select(period => $\"\"\"\n                    {period.GetProperty(\"name\").GetString()}\n                    Temperature: {period.GetProperty(\"temperature\").GetInt32()}°F\n                    Wind: {period.GetProperty(\"windSpeed\").GetString()} {period.GetProperty(\"windDirection\").GetString()}\n                    Forecast: {period.GetProperty(\"detailedForecast\").GetString()}\n                    \"\"\"));\n        }\n    }\n    ```\n\n    ### Running the server\n\n    Finally, run the server using the following command:\n\n    ```bash\n    dotnet run\n    ```\n\n    This will start the server and listen for incoming requests on standard input/output.\n\n    ## Testing your server with Claude for Desktop\n\n    <Note>\n      Claude for Desktop is not yet available on Linux. Linux users can proceed to the [Building a client](/quickstart/client) tutorial to build an MCP client that connects to the server we just built.\n    </Note>\n\n    First, make sure you have Claude for Desktop installed. [You can install the latest version\n    here.](https://claude.ai/download) If you already have Claude for Desktop, **make sure it's updated to the latest version.**\n    We'll need to configure Claude for Desktop for whichever MCP servers you want to use. To do this, open your Claude for Desktop App configuration at `~/Library/Application Support/Claude/claude_desktop_config.json` in a text editor. Make sure to create the file if it doesn't exist.\n    For example, if you have [VS Code](https://code.visualstudio.com/) installed:\n\n    <Tabs>\n      <Tab title=\"MacOS/Linux\">\n        ```bash\n        code ~/Library/Application\\ Support/Claude/claude_desktop_config.json\n        ```\n      </Tab>\n\n      <Tab title=\"Windows\">\n        ```powershell\n        code $env:AppData\\Claude\\claude_desktop_config.json\n        ```\n      </Tab>\n    </Tabs>\n\n    You'll then add your servers in the `mcpServers` key. The MCP UI elements will only show up in Claude for Desktop if at least one server is properly configured.\n    In this case, we'll add our single weather server like so:\n\n    <Tabs>\n      <Tab title=\"MacOS/Linux\">\n        ```json\n        {\n          \"mcpServers\": {\n            \"weather\": {\n              \"command\": \"dotnet\",\n              \"args\": [\"run\", \"--project\", \"/ABSOLUTE/PATH/TO/PROJECT\", \"--no-build\"]\n            }\n          }\n        }\n        ```\n      </Tab>\n\n      <Tab title=\"Windows\">\n        ```json\n        {\n          \"mcpServers\": {\n            \"weather\": {\n              \"command\": \"dotnet\",\n              \"args\": [\n                \"run\",\n                \"--project\",\n                \"C:\\\\ABSOLUTE\\\\PATH\\\\TO\\\\PROJECT\",\n                \"--no-build\"\n              ]\n            }\n          }\n        }\n        ```\n      </Tab>\n    </Tabs>\n\n    This tells Claude for Desktop:\n\n    1. There's an MCP server named \"weather\"\n    2. Launch it by running `dotnet run /ABSOLUTE/PATH/TO/PROJECT`\n       Save the file, and restart **Claude for Desktop**.\n  </Tab>\n</Tabs>\n\n### Test with commands\n\nLet's make sure Claude for Desktop is picking up the two tools we've exposed in our `weather` server. You can do this by looking for the \"Search and tools\" <img src=\"https://mintlify.s3.us-west-1.amazonaws.com/mcp/images/claude-desktop-mcp-slider.svg\" style={{display: 'inline', margin: 0, height: '1.3em'}} /> icon:\n\n<Frame>\n  <img src=\"https://mintlify.s3.us-west-1.amazonaws.com/mcp/images/visual-indicator-mcp-tools.png\" />\n</Frame>\n\nAfter clicking on the slider icon, you should see two tools listed:\n\n<Frame>\n  <img src=\"https://mintlify.s3.us-west-1.amazonaws.com/mcp/images/available-mcp-tools.png\" />\n</Frame>\n\nIf your server isn't being picked up by Claude for Desktop, proceed to the [Troubleshooting](#troubleshooting) section for debugging tips.\n\nIf the tool settings icon has shown up, you can now test your server by running the following commands in Claude for Desktop:\n\n* What's the weather in Sacramento?\n* What are the active weather alerts in Texas?\n\n<Frame>\n  <img src=\"https://mintlify.s3.us-west-1.amazonaws.com/mcp/images/current-weather.png\" />\n</Frame>\n\n<Frame>\n  <img src=\"https://mintlify.s3.us-west-1.amazonaws.com/mcp/images/weather-alerts.png\" />\n</Frame>\n\n<Note>\n  Since this is the US National Weather service, the queries will only work for US locations.\n</Note>\n\n## What's happening under the hood\n\nWhen you ask a question:\n\n1. The client sends your question to Claude\n2. Claude analyzes the available tools and decides which one(s) to use\n3. The client executes the chosen tool(s) through the MCP server\n4. The results are sent back to Claude\n5. Claude formulates a natural language response\n6. The response is displayed to you!\n\n## Troubleshooting\n\n<AccordionGroup>\n  <Accordion title=\"Claude for Desktop Integration Issues\">\n    **Getting logs from Claude for Desktop**\n\n    Claude.app logging related to MCP is written to log files in `~/Library/Logs/Claude`:\n\n    * `mcp.log` will contain general logging about MCP connections and connection failures.\n    * Files named `mcp-server-SERVERNAME.log` will contain error (stderr) logging from the named server.\n\n    You can run the following command to list recent logs and follow along with any new ones:\n\n    ```bash\n    # Check Claude's logs for errors\n    tail -n 20 -f ~/Library/Logs/Claude/mcp*.log\n    ```\n\n    **Server not showing up in Claude**\n\n    1. Check your `claude_desktop_config.json` file syntax\n    2. Make sure the path to your project is absolute and not relative\n    3. Restart Claude for Desktop completely\n\n    **Tool calls failing silently**\n\n    If Claude attempts to use the tools but they fail:\n\n    1. Check Claude's logs for errors\n    2. Verify your server builds and runs without errors\n    3. Try restarting Claude for Desktop\n\n    **None of this is working. What do I do?**\n\n    Please refer to our [debugging guide](/docs/tools/debugging) for better debugging tools and more detailed guidance.\n  </Accordion>\n\n  <Accordion title=\"Weather API Issues\">\n    **Error: Failed to retrieve grid point data**\n\n    This usually means either:\n\n    1. The coordinates are outside the US\n    2. The NWS API is having issues\n    3. You're being rate limited\n\n    Fix:\n\n    * Verify you're using US coordinates\n    * Add a small delay between requests\n    * Check the NWS API status page\n\n    **Error: No active alerts for \\[STATE]**\n\n    This isn't an error - it just means there are no current weather alerts for that state. Try a different state or check during severe weather.\n  </Accordion>\n</AccordionGroup>\n\n<Note>\n  For more advanced troubleshooting, check out our guide on [Debugging MCP](/docs/tools/debugging)\n</Note>\n\n## Next steps\n\n<CardGroup cols={2}>\n  <Card title=\"Building a client\" icon=\"outlet\" href=\"/quickstart/client\">\n    Learn how to build your own MCP client that can connect to your server\n  </Card>\n\n  <Card title=\"Example servers\" icon=\"grid\" href=\"/examples\">\n    Check out our gallery of official MCP servers and implementations\n  </Card>\n\n  <Card title=\"Debugging Guide\" icon=\"bug\" href=\"/docs/tools/debugging\">\n    Learn how to effectively debug MCP servers and integrations\n  </Card>\n\n  <Card title=\"Building MCP with LLMs\" icon=\"comments\" href=\"/tutorials/building-mcp-with-llms\">\n    Learn how to use LLMs like Claude to speed up your MCP development\n  </Card>\n</CardGroup>\n\n\n# For Claude Desktop Users\nSource: https://modelcontextprotocol.io/quickstart/user\n\nGet started using pre-built servers in Claude for Desktop.\n\nIn this tutorial, you will extend [Claude for Desktop](https://claude.ai/download) so that it can read from your computer's file system, write new files, move files, and even search files.\n\n<Frame>\n  <img src=\"https://mintlify.s3.us-west-1.amazonaws.com/mcp/images/quickstart-filesystem.png\" />\n</Frame>\n\nDon't worry — it will ask you for your permission before executing these actions!\n\n## 1. Download Claude for Desktop\n\nStart by downloading [Claude for Desktop](https://claude.ai/download), choosing either macOS or Windows. (Linux is not yet supported for Claude for Desktop.)\n\nFollow the installation instructions.\n\nIf you already have Claude for Desktop, make sure it's on the latest version by clicking on the Claude menu on your computer and selecting \"Check for Updates...\"\n\n## 2. Add the Filesystem MCP Server\n\nTo add this filesystem functionality, we will be installing a pre-built [Filesystem MCP Server](https://github.com/modelcontextprotocol/servers/tree/main/src/filesystem) to Claude for Desktop. This is one of several current [reference servers](https://github.com/modelcontextprotocol/servers/tree/main) and many community-created servers.\n\nGet started by opening up the Claude menu on your computer and select \"Settings...\" Please note that these are not the Claude Account Settings found in the app window itself.\n\nThis is what it should look like on a Mac:\n\n<Frame style={{ textAlign: \"center\" }}>\n  <img src=\"https://mintlify.s3.us-west-1.amazonaws.com/mcp/images/quickstart-menu.png\" width=\"400\" />\n</Frame>\n\nClick on \"Developer\" in the left-hand bar of the Settings pane, and then click on \"Edit Config\":\n\n<Frame>\n  <img src=\"https://mintlify.s3.us-west-1.amazonaws.com/mcp/images/quickstart-developer.png\" />\n</Frame>\n\nThis will create a configuration file at:\n\n* macOS: `~/Library/Application Support/Claude/claude_desktop_config.json`\n* Windows: `%APPDATA%\\Claude\\claude_desktop_config.json`\n\nif you don't already have one, and will display the file in your file system.\n\nOpen up the configuration file in any text editor. Replace the file contents with this:\n\n<Tabs>\n  <Tab title=\"MacOS/Linux\">\n    ```json\n    {\n      \"mcpServers\": {\n        \"filesystem\": {\n          \"command\": \"npx\",\n          \"args\": [\n            \"-y\",\n            \"@modelcontextprotocol/server-filesystem\",\n            \"/Users/username/Desktop\",\n            \"/Users/username/Downloads\"\n          ]\n        }\n      }\n    }\n    ```\n  </Tab>\n\n  <Tab title=\"Windows\">\n    ```json\n    {\n      \"mcpServers\": {\n        \"filesystem\": {\n          \"command\": \"npx\",\n          \"args\": [\n            \"-y\",\n            \"@modelcontextprotocol/server-filesystem\",\n            \"C:\\\\Users\\\\username\\\\Desktop\",\n            \"C:\\\\Users\\\\username\\\\Downloads\"\n          ]\n        }\n      }\n    }\n    ```\n  </Tab>\n</Tabs>\n\nMake sure to replace `username` with your computer's username. The paths should point to valid directories that you want Claude to be able to access and modify. It's set up to work for Desktop and Downloads, but you can add more paths as well.\n\nYou will also need [Node.js](https://nodejs.org) on your computer for this to run properly. To verify you have Node installed, open the command line on your computer.\n\n* On macOS, open the Terminal from your Applications folder\n* On Windows, press Windows + R, type \"cmd\", and press Enter\n\nOnce in the command line, verify you have Node installed by entering in the following command:\n\n```bash\nnode --version\n```\n\nIf you get an error saying \"command not found\" or \"node is not recognized\", download Node from [nodejs.org](https://nodejs.org/).\n\n<Tip>\n  **How does the configuration file work?**\n\n  This configuration file tells Claude for Desktop which MCP servers to start up every time you start the application. In this case, we have added one server called \"filesystem\" that will use the Node `npx` command to install and run `@modelcontextprotocol/server-filesystem`. This server, described [here](https://github.com/modelcontextprotocol/servers/tree/main/src/filesystem), will let you access your file system in Claude for Desktop.\n</Tip>\n\n<Warning>\n  **Command Privileges**\n\n  Claude for Desktop will run the commands in the configuration file with the permissions of your user account, and access to your local files. Only add commands if you understand and trust the source.\n</Warning>\n\n## 3. Restart Claude\n\nAfter updating your configuration file, you need to restart Claude for Desktop.\n\nUpon restarting, you should see a slider <img src=\"https://mintlify.s3.us-west-1.amazonaws.com/mcp/images/claude-desktop-mcp-slider.svg\" style={{display: 'inline', margin: 0, height: '1.3em'}} /> icon in the bottom left corner of the input box:\n\n<Frame>\n  <img src=\"https://mintlify.s3.us-west-1.amazonaws.com/mcp/images/quickstart-slider.png\" />\n</Frame>\n\nAfter clicking on the slider icon, you should see the tools that come with the Filesystem MCP Server:\n\n<Frame style={{ textAlign: \"center\" }}>\n  <img src=\"https://mintlify.s3.us-west-1.amazonaws.com/mcp/images/quickstart-tools.png\" width=\"400\" />\n</Frame>\n\nIf your server isn't being picked up by Claude for Desktop, proceed to the [Troubleshooting](#troubleshooting) section for debugging tips.\n\n## 4. Try it out!\n\nYou can now talk to Claude and ask it about your filesystem. It should know when to call the relevant tools.\n\nThings you might try asking Claude:\n\n* Can you write a poem and save it to my desktop?\n* What are some work-related files in my downloads folder?\n* Can you take all the images on my desktop and move them to a new folder called \"Images\"?\n\nAs needed, Claude will call the relevant tools and seek your approval before taking an action:\n\n<Frame style={{ textAlign: \"center\" }}>\n  <img src=\"https://mintlify.s3.us-west-1.amazonaws.com/mcp/images/quickstart-approve.png\" width=\"500\" />\n</Frame>\n\n## Troubleshooting\n\n<AccordionGroup>\n  <Accordion title=\"Server not showing up in Claude / hammer icon missing\">\n    1. Restart Claude for Desktop completely\n    2. Check your `claude_desktop_config.json` file syntax\n    3. Make sure the file paths included in `claude_desktop_config.json` are valid and that they are absolute and not relative\n    4. Look at [logs](#getting-logs-from-claude-for-desktop) to see why the server is not connecting\n    5. In your command line, try manually running the server (replacing `username` as you did in `claude_desktop_config.json`) to see if you get any errors:\n\n    <Tabs>\n      <Tab title=\"MacOS/Linux\">\n        ```bash\n        npx -y @modelcontextprotocol/server-filesystem /Users/username/Desktop /Users/username/Downloads\n        ```\n      </Tab>\n\n      <Tab title=\"Windows\">\n        ```bash\n        npx -y @modelcontextprotocol/server-filesystem C:\\Users\\username\\Desktop C:\\Users\\username\\Downloads\n        ```\n      </Tab>\n    </Tabs>\n  </Accordion>\n\n  <Accordion title=\"Getting logs from Claude for Desktop\">\n    Claude.app logging related to MCP is written to log files in:\n\n    * macOS: `~/Library/Logs/Claude`\n\n    * Windows: `%APPDATA%\\Claude\\logs`\n\n    * `mcp.log` will contain general logging about MCP connections and connection failures.\n\n    * Files named `mcp-server-SERVERNAME.log` will contain error (stderr) logging from the named server.\n\n    You can run the following command to list recent logs and follow along with any new ones (on Windows, it will only show recent logs):\n\n    <Tabs>\n      <Tab title=\"MacOS/Linux\">\n        ```bash\n        # Check Claude's logs for errors\n        tail -n 20 -f ~/Library/Logs/Claude/mcp*.log\n        ```\n      </Tab>\n\n      <Tab title=\"Windows\">\n        ```bash\n        type \"%APPDATA%\\Claude\\logs\\mcp*.log\"\n        ```\n      </Tab>\n    </Tabs>\n  </Accordion>\n\n  <Accordion title=\"Tool calls failing silently\">\n    If Claude attempts to use the tools but they fail:\n\n    1. Check Claude's logs for errors\n    2. Verify your server builds and runs without errors\n    3. Try restarting Claude for Desktop\n  </Accordion>\n\n  <Accordion title=\"None of this is working. What do I do?\">\n    Please refer to our [debugging guide](/docs/tools/debugging) for better debugging tools and more detailed guidance.\n  </Accordion>\n\n  <Accordion title=\"ENOENT error and `${APPDATA}` in paths on Windows\">\n    If your configured server fails to load, and you see within its logs an error referring to `${APPDATA}` within a path, you may need to add the expanded value of `%APPDATA%` to your `env` key in `claude_desktop_config.json`:\n\n    ```json\n    {\n      \"brave-search\": {\n        \"command\": \"npx\",\n        \"args\": [\"-y\", \"@modelcontextprotocol/server-brave-search\"],\n        \"env\": {\n          \"APPDATA\": \"C:\\\\Users\\\\user\\\\AppData\\\\Roaming\\\\\",\n          \"BRAVE_API_KEY\": \"...\"\n        }\n      }\n    }\n    ```\n\n    With this change in place, launch Claude Desktop once again.\n\n    <Warning>\n      **NPM should be installed globally**\n\n      The `npx` command may continue to fail if you have not installed NPM globally. If NPM is already installed globally, you will find `%APPDATA%\\npm` exists on your system. If not, you can install NPM globally by running the following command:\n\n      ```bash\n      npm install -g npm\n      ```\n    </Warning>\n  </Accordion>\n</AccordionGroup>\n\n## Next steps\n\n<CardGroup cols={2}>\n  <Card title=\"Explore other servers\" icon=\"grid\" href=\"/examples\">\n    Check out our gallery of official MCP servers and implementations\n  </Card>\n\n  <Card title=\"Build your own server\" icon=\"code\" href=\"/quickstart/server\">\n    Now build your own custom server to use in Claude for Desktop and other\n    clients\n  </Card>\n</CardGroup>\n\n\n# Architecture\nSource: https://modelcontextprotocol.io/specification/2025-06-18/architecture/index\n\n\n\n<div id=\"enable-section-numbers\" />\n\nThe Model Context Protocol (MCP) follows a client-host-server architecture where each\nhost can run multiple client instances. This architecture enables users to integrate AI\ncapabilities across applications while maintaining clear security boundaries and\nisolating concerns. Built on JSON-RPC, MCP provides a stateful session protocol focused\non context exchange and sampling coordination between clients and servers.\n\n## Core Components\n\n```mermaid\ngraph LR\n    subgraph \"Application Host Process\"\n        H[Host]\n        C1[Client 1]\n        C2[Client 2]\n        C3[Client 3]\n        H --> C1\n        H --> C2\n        H --> C3\n    end\n\n    subgraph \"Local machine\"\n        S1[Server 1<br>Files & Git]\n        S2[Server 2<br>Database]\n        R1[(\"Local<br>Resource A\")]\n        R2[(\"Local<br>Resource B\")]\n\n        C1 --> S1\n        C2 --> S2\n        S1 <--> R1\n        S2 <--> R2\n    end\n\n    subgraph \"Internet\"\n        S3[Server 3<br>External APIs]\n        R3[(\"Remote<br>Resource C\")]\n\n        C3 --> S3\n        S3 <--> R3\n    end\n```\n\n### Host\n\nThe host process acts as the container and coordinator:\n\n* Creates and manages multiple client instances\n* Controls client connection permissions and lifecycle\n* Enforces security policies and consent requirements\n* Handles user authorization decisions\n* Coordinates AI/LLM integration and sampling\n* Manages context aggregation across clients\n\n### Clients\n\nEach client is created by the host and maintains an isolated server connection:\n\n* Establishes one stateful session per server\n* Handles protocol negotiation and capability exchange\n* Routes protocol messages bidirectionally\n* Manages subscriptions and notifications\n* Maintains security boundaries between servers\n\nA host application creates and manages multiple clients, with each client having a 1:1\nrelationship with a particular server.\n\n### Servers\n\nServers provide specialized context and capabilities:\n\n* Expose resources, tools and prompts via MCP primitives\n* Operate independently with focused responsibilities\n* Request sampling through client interfaces\n* Must respect security constraints\n* Can be local processes or remote services\n\n## Design Principles\n\nMCP is built on several key design principles that inform its architecture and\nimplementation:\n\n1. **Servers should be extremely easy to build**\n\n   * Host applications handle complex orchestration responsibilities\n   * Servers focus on specific, well-defined capabilities\n   * Simple interfaces minimize implementation overhead\n   * Clear separation enables maintainable code\n\n2. **Servers should be highly composable**\n\n   * Each server provides focused functionality in isolation\n   * Multiple servers can be combined seamlessly\n   * Shared protocol enables interoperability\n   * Modular design supports extensibility\n\n3. **Servers should not be able to read the whole conversation, nor \"see into\" other\n   servers**\n\n   * Servers receive only necessary contextual information\n   * Full conversation history stays with the host\n   * Each server connection maintains isolation\n   * Cross-server interactions are controlled by the host\n   * Host process enforces security boundaries\n\n4. **Features can be added to servers and clients progressively**\n   * Core protocol provides minimal required functionality\n   * Additional capabilities can be negotiated as needed\n   * Servers and clients evolve independently\n   * Protocol designed for future extensibility\n   * Backwards compatibility is maintained\n\n## Capability Negotiation\n\nThe Model Context Protocol uses a capability-based negotiation system where clients and\nservers explicitly declare their supported features during initialization. Capabilities\ndetermine which protocol features and primitives are available during a session.\n\n* Servers declare capabilities like resource subscriptions, tool support, and prompt\n  templates\n* Clients declare capabilities like sampling support and notification handling\n* Both parties must respect declared capabilities throughout the session\n* Additional capabilities can be negotiated through extensions to the protocol\n\n```mermaid\nsequenceDiagram\n    participant Host\n    participant Client\n    participant Server\n\n    Host->>+Client: Initialize client\n    Client->>+Server: Initialize session with capabilities\n    Server-->>Client: Respond with supported capabilities\n\n    Note over Host,Server: Active Session with Negotiated Features\n\n    loop Client Requests\n        Host->>Client: User- or model-initiated action\n        Client->>Server: Request (tools/resources)\n        Server-->>Client: Response\n        Client-->>Host: Update UI or respond to model\n    end\n\n    loop Server Requests\n        Server->>Client: Request (sampling)\n        Client->>Host: Forward to AI\n        Host-->>Client: AI response\n        Client-->>Server: Response\n    end\n\n    loop Notifications\n        Server--)Client: Resource updates\n        Client--)Server: Status changes\n    end\n\n    Host->>Client: Terminate\n    Client->>-Server: End session\n    deactivate Server\n```\n\nEach capability unlocks specific protocol features for use during the session. For\nexample:\n\n* Implemented [server features](/specification/2025-06-18/server) must be advertised in the\n  server's capabilities\n* Emitting resource subscription notifications requires the server to declare\n  subscription support\n* Tool invocation requires the server to declare tool capabilities\n* [Sampling](/specification/2025-06-18/client) requires the client to declare support in its\n  capabilities\n\nThis capability negotiation ensures clients and servers have a clear understanding of\nsupported functionality while maintaining protocol extensibility.\n\n\n# Authorization\nSource: https://modelcontextprotocol.io/specification/2025-06-18/basic/authorization\n\n\n\n<div id=\"enable-section-numbers\" />\n\n<Info>**Protocol Revision**: 2025-06-18</Info>\n\n## Introduction\n\n### Purpose and Scope\n\nThe Model Context Protocol provides authorization capabilities at the transport level,\nenabling MCP clients to make requests to restricted MCP servers on behalf of resource\nowners. This specification defines the authorization flow for HTTP-based transports.\n\n### Protocol Requirements\n\nAuthorization is **OPTIONAL** for MCP implementations. When supported:\n\n* Implementations using an HTTP-based transport **SHOULD** conform to this specification.\n* Implementations using an STDIO transport **SHOULD NOT** follow this specification, and\n  instead retrieve credentials from the environment.\n* Implementations using alternative transports **MUST** follow established security best\n  practices for their protocol.\n\n### Standards Compliance\n\nThis authorization mechanism is based on established specifications listed below, but\nimplements a selected subset of their features to ensure security and interoperability\nwhile maintaining simplicity:\n\n* OAuth 2.1 IETF DRAFT ([draft-ietf-oauth-v2-1-12](https://datatracker.ietf.org/doc/html/draft-ietf-oauth-v2-1-12))\n* OAuth 2.0 Authorization Server Metadata\n  ([RFC8414](https://datatracker.ietf.org/doc/html/rfc8414))\n* OAuth 2.0 Dynamic Client Registration Protocol\n  ([RFC7591](https://datatracker.ietf.org/doc/html/rfc7591))\n* OAuth 2.0 Protected Resource Metadata ([RFC9728](https://datatracker.ietf.org/doc/html/rfc9728))\n\n## Authorization Flow\n\n### Roles\n\nA protected *MCP server* acts as an [OAuth 2.1 resource server](https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-12.html#name-roles),\ncapable of accepting and responding to protected resource requests using access tokens.\n\nAn *MCP client* acts as an [OAuth 2.1 client](https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-12.html#name-roles),\nmaking protected resource requests on behalf of a resource owner.\n\nThe *authorization server* is responsible for interacting with the user (if necessary) and issuing access tokens for use at the MCP server.\nThe implementation details of the authorization server are beyond the scope of this specification. It may be hosted with the\nresource server or a separate entity. The [Authorization Server Discovery section](#authorization-server-discovery)\nspecifies how an MCP server indicates the location of its corresponding authorization server to a client.\n\n### Overview\n\n1. Authorization servers **MUST** implement OAuth 2.1 with appropriate security\n   measures for both confidential and public clients.\n\n2. Authorization servers and MCP clients **SHOULD** support the OAuth 2.0 Dynamic Client Registration\n   Protocol ([RFC7591](https://datatracker.ietf.org/doc/html/rfc7591)).\n\n3. MCP servers **MUST** implement OAuth 2.0 Protected Resource Metadata ([RFC9728](https://datatracker.ietf.org/doc/html/rfc9728)).\n   MCP clients **MUST** use OAuth 2.0 Protected Resource Metadata for authorization server discovery.\n\n4. Authorization servers **MUST** provide OAuth 2.0 Authorization\n   Server Metadata ([RFC8414](https://datatracker.ietf.org/doc/html/rfc8414)).\n   MCP clients **MUST** use the OAuth 2.0 Authorization Server Metadata.\n\n### Authorization Server Discovery\n\nThis section describes the mechanisms by which MCP servers advertise their associated\nauthorization servers to MCP clients, as well as the discovery process through which MCP\nclients can determine authorization server endpoints and supported capabilities.\n\n#### Authorization Server Location\n\nMCP servers **MUST** implement the OAuth 2.0 Protected Resource Metadata ([RFC9728](https://datatracker.ietf.org/doc/html/rfc9728))\nspecification to indicate the locations of authorization servers. The Protected Resource Metadata document returned by the MCP server **MUST** include\nthe `authorization_servers` field containing at least one authorization server.\n\nThe specific use of `authorization_servers` is beyond the scope of this specification; implementers should consult\nOAuth 2.0 Protected Resource Metadata ([RFC9728](https://datatracker.ietf.org/doc/html/rfc9728)) for\nguidance on implementation details.\n\nImplementors should note that Protected Resource Metadata documents can define multiple authorization servers. The responsibility for selecting which authorization server to use lies with the MCP client, following the guidelines specified in\n[RFC9728 Section 7.6 \"Authorization Servers\"](https://datatracker.ietf.org/doc/html/rfc9728#name-authorization-servers).\n\nMCP servers **MUST** use the HTTP header `WWW-Authenticate` when returning a *401 Unauthorized* to indicate the location of the resource server metadata URL\nas described in [RFC9728 Section 5.1 \"WWW-Authenticate Response\"](https://datatracker.ietf.org/doc/html/rfc9728#name-www-authenticate-response).\n\nMCP clients **MUST** be able to parse `WWW-Authenticate` headers and respond appropriately to `HTTP 401 Unauthorized` responses from the MCP server.\n\n#### Server Metadata Discovery\n\nMCP clients **MUST** follow the OAuth 2.0 Authorization Server Metadata [RFC8414](https://datatracker.ietf.org/doc/html/rfc8414)\nspecification to obtain the information required to interact with the authorization server.\n\n#### Sequence Diagram\n\nThe following diagram outlines an example flow:\n\n```mermaid\nsequenceDiagram\n    participant C as Client\n    participant M as MCP Server (Resource Server)\n    participant A as Authorization Server\n\n    C->>M: MCP request without token\n    M-->>C: HTTP 401 Unauthorized with WWW-Authenticate header\n    Note over C: Extract resource_metadata<br />from WWW-Authenticate\n\n    C->>M: GET /.well-known/oauth-protected-resource\n    M-->>C: Resource metadata with authorization server URL\n    Note over C: Validate RS metadata,<br />build AS metadata URL\n\n    C->>A: GET /.well-known/oauth-authorization-server\n    A-->>C: Authorization server metadata\n\n    Note over C,A: OAuth 2.1 authorization flow happens here\n\n    C->>A: Token request\n    A-->>C: Access token\n\n    C->>M: MCP request with access token\n    M-->>C: MCP response\n    Note over C,M: MCP communication continues with valid token\n```\n\n### Dynamic Client Registration\n\nMCP clients and authorization servers **SHOULD** support the\nOAuth 2.0 Dynamic Client Registration Protocol [RFC7591](https://datatracker.ietf.org/doc/html/rfc7591)\nto allow MCP clients to obtain OAuth client IDs without user interaction. This provides a\nstandardized way for clients to automatically register with new authorization servers, which is crucial\nfor MCP because:\n\n* Clients may not know all possible MCP servers and their authorization servers in advance.\n* Manual registration would create friction for users.\n* It enables seamless connection to new MCP servers and their authorization servers.\n* Authorization servers can implement their own registration policies.\n\nAny authorization servers that *do not* support Dynamic Client Registration need to provide\nalternative ways to obtain a client ID (and, if applicable, client credentials). For one of\nthese authorization servers, MCP clients will have to either:\n\n1. Hardcode a client ID (and, if applicable, client credentials) specifically for the MCP client to use when\n   interacting with that authorization server, or\n2. Present a UI to users that allows them to enter these details, after registering an\n   OAuth client themselves (e.g., through a configuration interface hosted by the\n   server).\n\n### Authorization Flow Steps\n\nThe complete Authorization flow proceeds as follows:\n\n```mermaid\nsequenceDiagram\n    participant B as User-Agent (Browser)\n    participant C as Client\n    participant M as MCP Server (Resource Server)\n    participant A as Authorization Server\n\n    C->>M: MCP request without token\n    M->>C: HTTP 401 Unauthorized with WWW-Authenticate header\n    Note over C: Extract resource_metadata URL from WWW-Authenticate\n\n    C->>M: Request Protected Resource Metadata\n    M->>C: Return metadata\n\n    Note over C: Parse metadata and extract authorization server(s)<br/>Client determines AS to use\n\n    C->>A: GET /.well-known/oauth-authorization-server\n    A->>C: Authorization server metadata response\n\n    alt Dynamic client registration\n        C->>A: POST /register\n        A->>C: Client Credentials\n    end\n\n    Note over C: Generate PKCE parameters<br/>Include resource parameter\n    C->>B: Open browser with authorization URL + code_challenge + resource\n    B->>A: Authorization request with resource parameter\n    Note over A: User authorizes\n    A->>B: Redirect to callback with authorization code\n    B->>C: Authorization code callback\n    C->>A: Token request + code_verifier + resource\n    A->>C: Access token (+ refresh token)\n    C->>M: MCP request with access token\n    M-->>C: MCP response\n    Note over C,M: MCP communication continues with valid token\n```\n\n#### Resource Parameter Implementation\n\nMCP clients **MUST** implement Resource Indicators for OAuth 2.0 as defined in [RFC 8707](https://www.rfc-editor.org/rfc/rfc8707.html)\nto explicitly specify the target resource for which the token is being requested. The `resource` parameter:\n\n1. **MUST** be included in both authorization requests and token requests.\n2. **MUST** identify the MCP server that the client intends to use the token with.\n3. **MUST** use the canonical URI of the MCP server as defined in [RFC 8707 Section 2](https://www.rfc-editor.org/rfc/rfc8707.html#name-access-token-request).\n\n##### Canonical Server URI\n\nFor the purposes of this specification, the canonical URI of an MCP server is defined as the resource identifier as specified in\n[RFC 8707 Section 2](https://www.rfc-editor.org/rfc/rfc8707.html#section-2) and aligns with the `resource` parameter in\n[RFC 9728](https://datatracker.ietf.org/doc/html/rfc9728).\n\nMCP clients **SHOULD** provide the most specific URI that they can for the MCP server they intend to access, following the guidance in [RFC 8707](https://www.rfc-editor.org/rfc/rfc8707). While the canonical form uses lowercase scheme and host components, implementations **SHOULD** accept uppercase scheme and host components for robustness and interoperability.\n\nExamples of valid canonical URIs:\n\n* `https://mcp.example.com/mcp`\n* `https://mcp.example.com`\n* `https://mcp.example.com:8443`\n* `https://mcp.example.com/server/mcp` (when path component is necessary to identify individual MCP server)\n\nExamples of invalid canonical URIs:\n\n* `mcp.example.com` (missing scheme)\n* `https://mcp.example.com#fragment` (contains fragment)\n\n> **Note:** While both `https://mcp.example.com/` (with trailing slash) and `https://mcp.example.com` (without trailing slash) are technically valid absolute URIs according to [RFC 3986](https://www.rfc-editor.org/rfc/rfc3986), implementations **SHOULD** consistently use the form without the trailing slash for better interoperability unless the trailing slash is semantically significant for the specific resource.\n\nFor example, if accessing an MCP server at `https://mcp.example.com`, the authorization request would include:\n\n```\n&resource=https%3A%2F%2Fmcp.example.com\n```\n\nMCP clients **MUST** send this parameter regardless of whether authorization servers support it.\n\n### Access Token Usage\n\n#### Token Requirements\n\nAccess token handling when making requests to MCP servers **MUST** conform to the requirements defined in\n[OAuth 2.1 Section 5 \"Resource Requests\"](https://datatracker.ietf.org/doc/html/draft-ietf-oauth-v2-1-12#section-5).\nSpecifically:\n\n1. MCP client **MUST** use the Authorization request header field defined in\n   [OAuth 2.1 Section 5.1.1](https://datatracker.ietf.org/doc/html/draft-ietf-oauth-v2-1-12#section-5.1.1):\n\n```\nAuthorization: Bearer <access-token>\n```\n\nNote that authorization **MUST** be included in every HTTP request from client to server,\neven if they are part of the same logical session.\n\n2. Access tokens **MUST NOT** be included in the URI query string\n\nExample request:\n\n```http\nGET /mcp HTTP/1.1\nHost: mcp.example.com\nAuthorization: Bearer eyJhbGciOiJIUzI1NiIs...\n```\n\n#### Token Handling\n\nMCP servers, acting in their role as an OAuth 2.1 resource server, **MUST** validate access tokens as described in\n[OAuth 2.1 Section 5.2](https://datatracker.ietf.org/doc/html/draft-ietf-oauth-v2-1-12#section-5.2).\nMCP servers **MUST** validate that access tokens were issued specifically for them as the intended audience,\naccording to [RFC 8707 Section 2](https://www.rfc-editor.org/rfc/rfc8707.html#section-2).\nIf validation fails, servers **MUST** respond according to\n[OAuth 2.1 Section 5.3](https://datatracker.ietf.org/doc/html/draft-ietf-oauth-v2-1-12#section-5.3)\nerror handling requirements. Invalid or expired tokens **MUST** receive a HTTP 401\nresponse.\n\nMCP clients **MUST NOT** send tokens to the MCP server other than ones issued by the MCP server's authorization server.\n\nAuthorization servers **MUST** only accept tokens that are valid for use with their\nown resources.\n\nMCP servers **MUST NOT** accept or transit any other tokens.\n\n### Error Handling\n\nServers **MUST** return appropriate HTTP status codes for authorization errors:\n\n| Status Code | Description  | Usage                                      |\n| ----------- | ------------ | ------------------------------------------ |\n| 401         | Unauthorized | Authorization required or token invalid    |\n| 403         | Forbidden    | Invalid scopes or insufficient permissions |\n| 400         | Bad Request  | Malformed authorization request            |\n\n## Security Considerations\n\nImplementations **MUST** follow OAuth 2.1 security best practices as laid out in [OAuth 2.1 Section 7. \"Security Considerations\"](https://datatracker.ietf.org/doc/html/draft-ietf-oauth-v2-1-12#name-security-considerations).\n\n### Token Audience Binding and Validation\n\n[RFC 8707](https://www.rfc-editor.org/rfc/rfc8707.html) Resource Indicators provide critical security benefits by binding tokens to their intended\naudiences **when the Authorization Server supports the capability**. To enable current and future adoption:\n\n* MCP clients **MUST** include the `resource` parameter in authorization and token requests as specified in the [Resource Parameter Implementation](#resource-parameter-implementation) section\n* MCP servers **MUST** validate that tokens presented to them were specifically issued for their use\n\nThe [Security Best Practices document](/specification/draft/basic/security_best_practices#token-passthrough)\noutlines why token audience validation is crucial and why token passthrough is explicitly forbidden.\n\n### Token Theft\n\nAttackers who obtain tokens stored by the client, or tokens cached or logged on the server can access protected resources with\nrequests that appear legitimate to resource servers.\n\nClients and servers **MUST** implement secure token storage and follow OAuth best practices,\nas outlined in [OAuth 2.1, Section 7.1](https://datatracker.ietf.org/doc/html/draft-ietf-oauth-v2-1-12#section-7.1).\n\nAuthorization servers **SHOULD** issue short-lived access tokens to reduce the impact of leaked tokens.\nFor public clients, authorization servers **MUST** rotate refresh tokens as described in [OAuth 2.1 Section 4.3.1 \"Refresh Token Grant\"](https://datatracker.ietf.org/doc/html/draft-ietf-oauth-v2-1-12#section-4.3.1).\n\n### Communication Security\n\nImplementations **MUST** follow [OAuth 2.1 Section 1.5 \"Communication Security\"](https://datatracker.ietf.org/doc/html/draft-ietf-oauth-v2-1-12#section-1.5).\n\nSpecifically:\n\n1. All authorization server endpoints **MUST** be served over HTTPS.\n2. All redirect URIs **MUST** be either `localhost` or use HTTPS.\n\n### Authorization Code Protection\n\nAn attacker who has gained access to an authorization code contained in an authorization response can try to redeem the authorization code for an access token or otherwise make use of the authorization code.\n(Further described in [OAuth 2.1 Section 7.5](https://datatracker.ietf.org/doc/html/draft-ietf-oauth-v2-1-12#section-7.5))\n\nTo mitigate this, MCP clients **MUST** implement PKCE according to [OAuth 2.1 Section 7.5.2](https://datatracker.ietf.org/doc/html/draft-ietf-oauth-v2-1-12#section-7.5.2).\nPKCE helps prevent authorization code interception and injection attacks by requiring clients to create a secret verifier-challenge pair, ensuring that only the original requestor can exchange an authorization code for tokens.\n\n### Open Redirection\n\nAn attacker may craft malicious redirect URIs to direct users to phishing sites.\n\nMCP clients **MUST** have redirect URIs registered with the authorization server.\n\nAuthorization servers **MUST** validate exact redirect URIs against pre-registered values to prevent redirection attacks.\n\nMCP clients **SHOULD** use and verify state parameters in the authorization code flow\nand discard any results that do not include or have a mismatch with the original state.\n\nAuthorization servers **MUST** take precautions to prevent redirecting user agents to untrusted URI's, following suggestions laid out in [OAuth 2.1 Section 7.12.2](https://datatracker.ietf.org/doc/html/draft-ietf-oauth-v2-1-12#section-7.12.2)\n\nAuthorization servers **SHOULD** only automatically redirect the user agent if it trusts the redirection URI. If the URI is not trusted, the authorization server MAY inform the user and rely on the user to make the correct decision.\n\n### Confused Deputy Problem\n\nAttackers can exploit MCP servers acting as intermediaries to third-party APIs, leading to [confused deputy vulnerabilities](/specification/2025-06-18/basic/security_best_practices#confused-deputy-problem).\nBy using stolen authorization codes, they can obtain access tokens without user consent.\n\nMCP proxy servers using static client IDs **MUST** obtain user consent for each dynamically\nregistered client before forwarding to third-party authorization servers (which may require additional consent).\n\n### Access Token Privilege Restriction\n\nAn attacker can gain unauthorized access or otherwise compromise a MCP server if the server accepts tokens issued for other resources.\n\nThis vulnerability has two critical dimensions:\n\n1. **Audience validation failures.** When an MCP server doesn't verify that tokens were specifically intended for it (for example, via the audience claim, as mentioned in [RFC9068](https://www.rfc-editor.org/rfc/rfc9068.html)), it may accept tokens originally issued for other services. This breaks a fundamental OAuth security boundary, allowing attackers to reuse legitimate tokens across different services than intended.\n2. **Token passthrough.** If the MCP server not only accepts tokens with incorrect audiences but also forwards these unmodified tokens to downstream services, it can potentially cause the [\"confused deputy\" problem](#confused-deputy-problem), where the downstream API may incorrectly trust the token as if it came from the MCP server or assume the token was validated by the upstream API. See the [Token Passthrough section](/specification/2025-06-18/basic/security_best_practices#token-passthrough) of the Security Best Practices guide for additional details.\n\nMCP servers **MUST** validate access tokens before processing the request, ensuring the access token is issued specifically for the MCP server, and take all necessary steps to ensure no data is returned to unauthorized parties.\n\nA MCP server **MUST** follow the guidelines in [OAuth 2.1 - Section 5.2](https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-12.html#section-5.2) to validate inbound tokens.\n\nMCP servers **MUST** only accept tokens specifically intended for themselves and **MUST** reject tokens that do not include them in the audience claim or otherwise verify that they are the intended recipient of the token. See the [Security Best Practices Token Passthrough section](/specification/2025-06-18/basic/security_best_practices#token-passthrough) for details.\n\nIf the MCP server makes requests to upstream APIs, it may act as an OAuth client to them. The access token used at the upstream API is a seperate token, issued by the upstream authorization server. The MCP server **MUST NOT** pass through the token it received from the MCP client.\n\nMCP clients **MUST** implement and use the `resource` parameter as defined in [RFC 8707 - Resource Indicators for OAuth 2.0](https://www.rfc-editor.org/rfc/rfc8707.html)\nto explicitly specify the target resource for which the token is being requested. This requirement aligns with the recommendation in\n[RFC 9728 Section 7.4](https://datatracker.ietf.org/doc/html/rfc9728#section-7.4). This ensures that access tokens are bound to their intended resources and\ncannot be misused across different services.\n\n\n# Overview\nSource: https://modelcontextprotocol.io/specification/2025-06-18/basic/index\n\n\n\n<div id=\"enable-section-numbers\" />\n\n<Info>**Protocol Revision**: 2025-06-18</Info>\n\nThe Model Context Protocol consists of several key components that work together:\n\n* **Base Protocol**: Core JSON-RPC message types\n* **Lifecycle Management**: Connection initialization, capability negotiation, and\n  session control\n* **Authorization**: Authentication and authorization framework for HTTP-based transports\n* **Server Features**: Resources, prompts, and tools exposed by servers\n* **Client Features**: Sampling and root directory lists provided by clients\n* **Utilities**: Cross-cutting concerns like logging and argument completion\n\nAll implementations **MUST** support the base protocol and lifecycle management\ncomponents. Other components **MAY** be implemented based on the specific needs of the\napplication.\n\nThese protocol layers establish clear separation of concerns while enabling rich\ninteractions between clients and servers. The modular design allows implementations to\nsupport exactly the features they need.\n\n## Messages\n\nAll messages between MCP clients and servers **MUST** follow the\n[JSON-RPC 2.0](https://www.jsonrpc.org/specification) specification. The protocol defines\nthese types of messages:\n\n### Requests\n\nRequests are sent from the client to the server or vice versa, to initiate an operation.\n\n```typescript\n{\n  jsonrpc: \"2.0\";\n  id: string | number;\n  method: string;\n  params?: {\n    [key: string]: unknown;\n  };\n}\n```\n\n* Requests **MUST** include a string or integer ID.\n* Unlike base JSON-RPC, the ID **MUST NOT** be `null`.\n* The request ID **MUST NOT** have been previously used by the requestor within the same\n  session.\n\n### Responses\n\nResponses are sent in reply to requests, containing the result or error of the operation.\n\n```typescript\n{\n  jsonrpc: \"2.0\";\n  id: string | number;\n  result?: {\n    [key: string]: unknown;\n  }\n  error?: {\n    code: number;\n    message: string;\n    data?: unknown;\n  }\n}\n```\n\n* Responses **MUST** include the same ID as the request they correspond to.\n* **Responses** are further sub-categorized as either **successful results** or\n  **errors**. Either a `result` or an `error` **MUST** be set. A response **MUST NOT**\n  set both.\n* Results **MAY** follow any JSON object structure, while errors **MUST** include an\n  error code and message at minimum.\n* Error codes **MUST** be integers.\n\n### Notifications\n\nNotifications are sent from the client to the server or vice versa, as a one-way message.\nThe receiver **MUST NOT** send a response.\n\n```typescript\n{\n  jsonrpc: \"2.0\";\n  method: string;\n  params?: {\n    [key: string]: unknown;\n  };\n}\n```\n\n* Notifications **MUST NOT** include an ID.\n\n## Auth\n\nMCP provides an [Authorization](/specification/2025-06-18/basic/authorization) framework for use with HTTP.\nImplementations using an HTTP-based transport **SHOULD** conform to this specification,\nwhereas implementations using STDIO transport **SHOULD NOT** follow this specification,\nand instead retrieve credentials from the environment.\n\nAdditionally, clients and servers **MAY** negotiate their own custom authentication and\nauthorization strategies.\n\nFor further discussions and contributions to the evolution of MCP’s auth mechanisms, join\nus in\n[GitHub Discussions](https://github.com/modelcontextprotocol/specification/discussions)\nto help shape the future of the protocol!\n\n## Schema\n\nThe full specification of the protocol is defined as a\n[TypeScript schema](https://github.com/modelcontextprotocol/specification/blob/main/schema/2025-06-18/schema.ts).\nThis is the source of truth for all protocol messages and structures.\n\nThere is also a\n[JSON Schema](https://github.com/modelcontextprotocol/specification/blob/main/schema/2025-06-18/schema.json),\nwhich is automatically generated from the TypeScript source of truth, for use with\nvarious automated tooling.\n\n### General fields\n\n#### `_meta`\n\nThe `_meta` property/parameter is reserved by MCP to allow clients and servers\nto attach additional metadata to their interactions.\n\nCertain key names are reserved by MCP for protocol-level metadata, as specified below;\nimplementations MUST NOT make assumptions about values at these keys.\n\nAdditionally, definitions in the [schema](https://github.com/modelcontextprotocol/specification/blob/main/schema/2025-06-18/schema.ts)\nmay reserve particular names for purpose-specific metadata, as declared in those definitions.\n\n**Key name format:** valid `_meta` key names have two segments: an optional **prefix**, and a **name**.\n\n**Prefix:**\n\n* If specified, MUST be a series of labels separated by dots (`.`), followed by a slash (`/`).\n  * Labels MUST start with a letter and end with a letter or digit; interior characters can be letters, digits, or hyphens (`-`).\n* Any prefix beginning with zero or more valid labels, followed by `modelcontextprotocol` or `mcp`, followed by any valid label,\n  is **reserved** for MCP use.\n  * For example: `modelcontextprotocol.io/`, `mcp.dev/`, `api.modelcontextprotocol.org/`, and `tools.mcp.com/` are all reserved.\n\n**Name:**\n\n* Unless empty, MUST begin and end with an alphanumeric character (`[a-z0-9A-Z]`).\n* MAY contain hyphens (`-`), underscores (`_`), dots (`.`), and alphanumerics in between.\n\n\n# Lifecycle\nSource: https://modelcontextprotocol.io/specification/2025-06-18/basic/lifecycle\n\n\n\n<div id=\"enable-section-numbers\" />\n\n<Info>**Protocol Revision**: 2025-06-18</Info>\n\nThe Model Context Protocol (MCP) defines a rigorous lifecycle for client-server\nconnections that ensures proper capability negotiation and state management.\n\n1. **Initialization**: Capability negotiation and protocol version agreement\n2. **Operation**: Normal protocol communication\n3. **Shutdown**: Graceful termination of the connection\n\n```mermaid\nsequenceDiagram\n    participant Client\n    participant Server\n\n    Note over Client,Server: Initialization Phase\n    activate Client\n    Client->>+Server: initialize request\n    Server-->>Client: initialize response\n    Client--)Server: initialized notification\n\n    Note over Client,Server: Operation Phase\n    rect rgb(200, 220, 250)\n        note over Client,Server: Normal protocol operations\n    end\n\n    Note over Client,Server: Shutdown\n    Client--)-Server: Disconnect\n    deactivate Server\n    Note over Client,Server: Connection closed\n```\n\n## Lifecycle Phases\n\n### Initialization\n\nThe initialization phase **MUST** be the first interaction between client and server.\nDuring this phase, the client and server:\n\n* Establish protocol version compatibility\n* Exchange and negotiate capabilities\n* Share implementation details\n\nThe client **MUST** initiate this phase by sending an `initialize` request containing:\n\n* Protocol version supported\n* Client capabilities\n* Client implementation information\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"method\": \"initialize\",\n  \"params\": {\n    \"protocolVersion\": \"2024-11-05\",\n    \"capabilities\": {\n      \"roots\": {\n        \"listChanged\": true\n      },\n      \"sampling\": {},\n      \"elicitation\": {}\n    },\n    \"clientInfo\": {\n      \"name\": \"ExampleClient\",\n      \"title\": \"Example Client Display Name\",\n      \"version\": \"1.0.0\"\n    }\n  }\n}\n```\n\nThe server **MUST** respond with its own capabilities and information:\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"result\": {\n    \"protocolVersion\": \"2024-11-05\",\n    \"capabilities\": {\n      \"logging\": {},\n      \"prompts\": {\n        \"listChanged\": true\n      },\n      \"resources\": {\n        \"subscribe\": true,\n        \"listChanged\": true\n      },\n      \"tools\": {\n        \"listChanged\": true\n      }\n    },\n    \"serverInfo\": {\n      \"name\": \"ExampleServer\",\n      \"title\": \"Example Server Display Name\",\n      \"version\": \"1.0.0\"\n    },\n    \"instructions\": \"Optional instructions for the client\"\n  }\n}\n```\n\nAfter successful initialization, the client **MUST** send an `initialized` notification\nto indicate it is ready to begin normal operations:\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"method\": \"notifications/initialized\"\n}\n```\n\n* The client **SHOULD NOT** send requests other than\n  [pings](/specification/2025-06-18/basic/utilities/ping) before the server has responded to the\n  `initialize` request.\n* The server **SHOULD NOT** send requests other than\n  [pings](/specification/2025-06-18/basic/utilities/ping) and\n  [logging](/specification/2025-06-18/server/utilities/logging) before receiving the `initialized`\n  notification.\n\n#### Version Negotiation\n\nIn the `initialize` request, the client **MUST** send a protocol version it supports.\nThis **SHOULD** be the *latest* version supported by the client.\n\nIf the server supports the requested protocol version, it **MUST** respond with the same\nversion. Otherwise, the server **MUST** respond with another protocol version it\nsupports. This **SHOULD** be the *latest* version supported by the server.\n\nIf the client does not support the version in the server's response, it **SHOULD**\ndisconnect.\n\n<Note>\n  If using HTTP, the client **MUST** include the `MCP-Protocol-Version: <protocol-version>` HTTP header on all subsequent requests to the MCP\n  server.\n  For details, see [the Protocol Version Header section in Transports](/specification/2025-06-18/basic/transports#protocol-version-header).\n</Note>\n\n#### Capability Negotiation\n\nClient and server capabilities establish which optional protocol features will be\navailable during the session.\n\nKey capabilities include:\n\n| Category | Capability     | Description                                                                               |\n| -------- | -------------- | ----------------------------------------------------------------------------------------- |\n| Client   | `roots`        | Ability to provide filesystem [roots](/specification/2025-06-18/client/roots)             |\n| Client   | `sampling`     | Support for LLM [sampling](/specification/2025-06-18/client/sampling) requests            |\n| Client   | `elicitation`  | Support for server [elicitation](/specification/2025-06-18/client/elicitation) requests   |\n| Client   | `experimental` | Describes support for non-standard experimental features                                  |\n| Server   | `prompts`      | Offers [prompt templates](/specification/2025-06-18/server/prompts)                       |\n| Server   | `resources`    | Provides readable [resources](/specification/2025-06-18/server/resources)                 |\n| Server   | `tools`        | Exposes callable [tools](/specification/2025-06-18/server/tools)                          |\n| Server   | `logging`      | Emits structured [log messages](/specification/2025-06-18/server/utilities/logging)       |\n| Server   | `completions`  | Supports argument [autocompletion](/specification/2025-06-18/server/utilities/completion) |\n| Server   | `experimental` | Describes support for non-standard experimental features                                  |\n\nCapability objects can describe sub-capabilities like:\n\n* `listChanged`: Support for list change notifications (for prompts, resources, and\n  tools)\n* `subscribe`: Support for subscribing to individual items' changes (resources only)\n\n### Operation\n\nDuring the operation phase, the client and server exchange messages according to the\nnegotiated capabilities.\n\nBoth parties **MUST**:\n\n* Respect the negotiated protocol version\n* Only use capabilities that were successfully negotiated\n\n### Shutdown\n\nDuring the shutdown phase, one side (usually the client) cleanly terminates the protocol\nconnection. No specific shutdown messages are defined—instead, the underlying transport\nmechanism should be used to signal connection termination:\n\n#### stdio\n\nFor the stdio [transport](/specification/2025-06-18/basic/transports), the client **SHOULD** initiate\nshutdown by:\n\n1. First, closing the input stream to the child process (the server)\n2. Waiting for the server to exit, or sending `SIGTERM` if the server does not exit\n   within a reasonable time\n3. Sending `SIGKILL` if the server does not exit within a reasonable time after `SIGTERM`\n\nThe server **MAY** initiate shutdown by closing its output stream to the client and\nexiting.\n\n#### HTTP\n\nFor HTTP [transports](/specification/2025-06-18/basic/transports), shutdown is indicated by closing the\nassociated HTTP connection(s).\n\n## Timeouts\n\nImplementations **SHOULD** establish timeouts for all sent requests, to prevent hung\nconnections and resource exhaustion. When the request has not received a success or error\nresponse within the timeout period, the sender **SHOULD** issue a [cancellation\nnotification](/specification/2025-06-18/basic/utilities/cancellation) for that request and stop waiting for\na response.\n\nSDKs and other middleware **SHOULD** allow these timeouts to be configured on a\nper-request basis.\n\nImplementations **MAY** choose to reset the timeout clock when receiving a [progress\nnotification](/specification/2025-06-18/basic/utilities/progress) corresponding to the request, as this\nimplies that work is actually happening. However, implementations **SHOULD** always\nenforce a maximum timeout, regardless of progress notifications, to limit the impact of a\nmisbehaving client or server.\n\n## Error Handling\n\nImplementations **SHOULD** be prepared to handle these error cases:\n\n* Protocol version mismatch\n* Failure to negotiate required capabilities\n* Request [timeouts](#timeouts)\n\nExample initialization error:\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"error\": {\n    \"code\": -32602,\n    \"message\": \"Unsupported protocol version\",\n    \"data\": {\n      \"supported\": [\"2024-11-05\"],\n      \"requested\": \"1.0.0\"\n    }\n  }\n}\n```\n\n\n# Security Best Practices\nSource: https://modelcontextprotocol.io/specification/2025-06-18/basic/security_best_practices\n\n\n\n<div id=\"enable-section-numbers\" />\n\n## Introduction\n\n### Purpose and Scope\n\nThis document provides security considerations for the Model Context Protocol (MCP), complementing the MCP Authorization specification. This document identifies security risks, attack vectors, and best practices specific to MCP implementations.\n\nThe primary audience for this document includes developers implementing MCP authorization flows, MCP server operators, and security professionals evaluating MCP-based systems. This document should be read alongside the MCP Authorization specification and [OAuth 2.0 security best practices](https://datatracker.ietf.org/doc/html/rfc9700).\n\n## Attacks and Mitigations\n\nThis section gives a detailed description of attacks on MCP implementations, along with potential countermeasures.\n\n### Confused Deputy Problem\n\nAttackers can exploit MCP servers proxying other resource servers, creating \"[confused deputy](https://en.wikipedia.org/wiki/Confused_deputy_problem)\" vulnerabilities.\n\n#### Terminology\n\n**MCP Proxy Server**\n: An MCP server that connects MCP clients to third-party APIs, offering MCP features while delegating operations and acting as a single OAuth client to the third-party API server.\n\n**Third-Party Authorization Server**\n: Authorization server that protects the third-party API. It may lack dynamic client registration support, requiring MCP proxy to use a static client ID for all requests.\n\n**Third-Party API**\n: The protected resource server that provides the actual API functionality. Access to this\nAPI requires tokens issued by the third-party authorization server.\n\n**Static Client ID**\n: A fixed OAuth 2.0 client identifier used by the MCP proxy server when communicating with\nthe third-party authorization server. This Client ID refers to the MCP server acting as a client\nto the Third-Party API. It is the same value for all MCP server to Third-Party API interactions regardless of\nwhich MCP client initiated the request.\n\n#### Architecture and Attack Flows\n\n##### Normal OAuth proxy usage (preserves user consent)\n\n```mermaid\nsequenceDiagram\n    participant UA as User-Agent (Browser)\n    participant MC as MCP Client\n    participant M as MCP Proxy Server\n    participant TAS as Third-Party Authorization Server\n\n    Note over UA,M: Initial Auth flow completed\n\n    Note over UA,TAS: Step 1: Legitimate user consent for Third Party Server\n\n    M->>UA: Redirect to third party authorization server\n    UA->>TAS: Authorization request (client_id: mcp-proxy)\n    TAS->>UA: Authorization consent screen\n    Note over UA: Review consent screen\n    UA->>TAS: Approve\n    TAS->>UA: Set consent cookie for client ID: mcp-proxy\n    TAS->>UA: 3P Authorization code + redirect to mcp-proxy-server.com\n    UA->>M: 3P Authorization code\n    Note over M,TAS: Exchange 3P code for 3P token\n    Note over M: Generate MCP authorization code\n    M->>UA: Redirect to MCP Client with MCP authorization code\n\n    Note over M,UA: Exchange code for token, etc.\n```\n\n##### Malicious OAuth proxy usage (skips user consent)\n\n```mermaid\nsequenceDiagram\n    participant UA as User-Agent (Browser)\n    participant M as MCP Proxy Server\n    participant TAS as Third-Party Authorization Server\n    participant A as Attacker\n\n\n    Note over UA,A: Step 2: Attack (leveraging existing cookie, skipping consent)\n    A->>M: Dynamically register malicious client, redirect_uri: attacker.com\n    A->>UA: Sends malicious link\n    UA->>TAS: Authorization request (client_id: mcp-proxy) + consent cookie\n    rect rgba(255, 17, 0, 0.67)\n    TAS->>TAS: Cookie present, consent skipped\n    end\n\n   TAS->>UA: 3P Authorization code + redirect to mcp-proxy-server.com\n   UA->>M: 3P Authorization code\n   Note over M,TAS: Exchange 3P code for 3P token\n   Note over M: Generate MCP authorization code\n   M->>UA: Redirect to attacker.com with MCP Authorization code\n   UA->>A: MCP Authorization code delivered to attacker.com\n   Note over M,A: Attacker exchanges MCP code for MCP token\n   A->>M: Attacker impersonates user to MCP server\n```\n\n#### Attack Description\n\nWhen an MCP proxy server uses a static client ID to authenticate with a third-party\nauthorization server that does not support dynamic client registration, the following\nattack becomes possible:\n\n1. A user authenticates normally through the MCP proxy server to access the third-party API\n2. During this flow, the third-party authorization server sets a cookie on the user agent\n   indicating consent for the static client ID\n3. An attacker later sends the user a malicious link containing a crafted authorization request which contains a malicious redirect URI along with a new dynamically registered client ID\n4. When the user clicks the link, their browser still has the consent cookie from the previous legitimate request\n5. The third-party authorization server detects the cookie and skips the consent screen\n6. The MCP authorization code is redirected to the attacker's server (specified in the crafted redirect\\_uri during dynamic client registration)\n7. The attacker exchanges the stolen authorization code for access tokens for the MCP server without the user's explicit approval\n8. Attacker now has access to the third-party API as the compromised user\n\n#### Mitigation\n\nMCP proxy servers using static client IDs **MUST** obtain user consent for each dynamically\nregistered client before forwarding to third-party authorization servers (which may require additional consent).\n\n### Token Passthrough\n\n\"Token passthrough\" is an anti-pattern where an MCP server accepts tokens from an MCP client without validating that the tokens were properly issued *to the MCP server* and \"passing them through\" to the downstream API.\n\n#### Risks\n\nToken passthrough is explicitly forbidden in the [authorization specification](/specification/2025-06-18/basic/authorization) as it introduces a number of security risks, that include:\n\n* **Security Control Circumvention**\n  * The MCP Server or downstream APIs might implement important security controls like rate limiting, request validation, or traffic monitoring, that depend on the token audience or other credential constraints. If clients can obtain and use tokens directly with the downstream APIs without the MCP server validating them properly or ensuring that the tokens are issued for the right service, they bypass these controls.\n* **Accountability and Audit Trail Issues**\n  * The MCP Server will be unable to identify or distinguish between MCP Clients when clients are calling with an upstream-issued access token which may be opaque to the MCP Server.\n  * The downstream Resource Server’s logs may show requests that appear to come from a different source with a different identity, rather than the MCP server that is actually forwarding the tokens.\n  * Both factors make incident investigation, controls, and auditing more difficult.\n  * If the MCP Server passes tokens without validating their claims (e.g., roles, privileges, or audience) or other metadata, a malicious actor in possession of a stolen token can use the server as a proxy for data exfiltration.\n* **Trust Boundary Issues**\n  * The downstream Resource Server grants trust to specific entities. This trust might include assumptions about origin or client behavior patterns. Breaking this trust boundary could lead to unexpected issues.\n  * If the token is accepted by multiple services without proper validation, an attacker compromising one service can use the token to access other connected services.\n* **Future Compatibility Risk**\n  * Even if an MCP Server starts as a \"pure proxy\" today, it might need to add security controls later. Starting with proper token audience separation makes it easier to evolve the security model.\n\n#### Mitigation\n\nMCP servers **MUST NOT** accept any tokens that were not explicitly issued for the MCP server.\n\n### Session Hijacking\n\nSession hijacking is an attack vector where a client is provided a session ID by the server, and an unauthorized party is able to obtain and use that same session ID to impersonate the original client and perform unauthorized actions on their behalf.\n\n#### Session Hijack Prompt Injection\n\n```mermaid\nsequenceDiagram\n    participant Client\n    participant ServerA\n    participant Queue\n    participant ServerB\n    participant Attacker\n\n    Client->>ServerA: Initialize (connect to streamable HTTP server)\n    ServerA-->>Client: Respond with session ID\n\n    Attacker->>ServerB: Access/guess session ID\n    Note right of Attacker: Attacker knows/guesses session ID\n\n    Attacker->>ServerB: Trigger event (malicious payload, using session ID)\n    ServerB->>Queue: Enqueue event (keyed by session ID)\n\n    ServerA->>Queue: Poll for events (using session ID)\n    Queue-->>ServerA: Event data (malicious payload)\n\n    ServerA-->>Client: Async response (malicious payload)\n    Client->>Client: Acts based on malicious payload\n```\n\n#### Session Hijack Impersonation\n\n```mermaid\nsequenceDiagram\n    participant Client\n    participant Server\n    participant Attacker\n\n    Client->>Server: Initialize (login/authenticate)\n    Server-->>Client: Respond with session ID (persistent session created)\n\n    Attacker->>Server: Access/guess session ID\n    Note right of Attacker: Attacker knows/guesses session ID\n\n    Attacker->>Server: Make API call (using session ID, no re-auth)\n    Server-->>Attacker: Respond as if Attacker is Client (session hijack)\n```\n\n#### Attack Description\n\nWhen you have multiple stateful HTTP servers that handle MCP requests, the following attack vectors are possible:\n\n**Session Hijack Prompt Injection**\n\n1. The client connects to **Server A** and receives a session ID.\n\n2. The attacker obtains an existing session ID and sends a malicious event to **Server B** with said session ID.\n\n   * When a server supports [redelivery/resumable streams](/specification/2025-06-18/basic/transports#resumability-and-redelivery), deliberately terminating the request before receiving the response could lead to it being resumed by the original client via the GET request for server sent events.\n   * If a particular server initiates server sent events as a consequence of a tool call such as a `notifications/tools/list_changed`, where it is possible to affect the tools that are offered by the server, a client could end up with tools that they were not aware were enabled.\n\n3. **Server B** enqueues the event (associated with session ID) into a shared queue.\n\n4. **Server A** polls the queue for events using the session ID and retrieves the malicious payload.\n\n5. **Server A** sends the malicious payload to the client as an asynchronous or resumed response.\n\n6. The client receives and acts on the malicious payload, leading to potential compromise.\n\n**Session Hijack Impersonation**\n\n1. The MCP client authenticates with the MCP server, creating a persistent session ID.\n2. The attacker obtains the session ID.\n3. The attacker makes calls to the MCP server using the session ID.\n4. MCP server does not check for additional authorization and treats the attacker as a legitimate user, allowing unauthorized access or actions.\n\n#### Mitigation\n\nTo prevent session hijacking and event injection attacks, the following mitigations should be implemented:\n\nMCP servers that implement authorization **MUST** verify all inbound requests.\nMCP Servers **MUST NOT** use sessions for authentication.\n\nMCP servers **MUST** use secure, non-deterministic session IDs.\nGenerated session IDs (e.g., UUIDs) **SHOULD** use secure random number generators. Avoid predictable or sequential session identifiers that could be guessed by an attacker. Rotating or expiring session IDs can also reduce the risk.\n\nMCP servers **SHOULD** bind session IDs to user-specific information.\nWhen storing or transmitting session-related data (e.g., in a queue), combine the session ID with information unique to the authorized user, such as their internal user ID. Use a key format like `<user_id>:<session_id>`. This ensures that even if an attacker guesses a session ID, they cannot impersonate another user as the user ID is derived from the user token and not provided by the client.\n\nMCP servers can optionally leverage additional unique identifiers.\n\n\n# Transports\nSource: https://modelcontextprotocol.io/specification/2025-06-18/basic/transports\n\n\n\n<div id=\"enable-section-numbers\" />\n\n<Info>**Protocol Revision**: 2025-06-18</Info>\n\nMCP uses JSON-RPC to encode messages. JSON-RPC messages **MUST** be UTF-8 encoded.\n\nThe protocol currently defines two standard transport mechanisms for client-server\ncommunication:\n\n1. [stdio](#stdio), communication over standard in and standard out\n2. [Streamable HTTP](#streamable-http)\n\nClients **SHOULD** support stdio whenever possible.\n\nIt is also possible for clients and servers to implement\n[custom transports](#custom-transports) in a pluggable fashion.\n\n## stdio\n\nIn the **stdio** transport:\n\n* The client launches the MCP server as a subprocess.\n* The server reads JSON-RPC messages from its standard input (`stdin`) and sends messages\n  to its standard output (`stdout`).\n* Messages are individual JSON-RPC requests, notifications, or responses.\n* Messages are delimited by newlines, and **MUST NOT** contain embedded newlines.\n* The server **MAY** write UTF-8 strings to its standard error (`stderr`) for logging\n  purposes. Clients **MAY** capture, forward, or ignore this logging.\n* The server **MUST NOT** write anything to its `stdout` that is not a valid MCP message.\n* The client **MUST NOT** write anything to the server's `stdin` that is not a valid MCP\n  message.\n\n```mermaid\nsequenceDiagram\n    participant Client\n    participant Server Process\n\n    Client->>+Server Process: Launch subprocess\n    loop Message Exchange\n        Client->>Server Process: Write to stdin\n        Server Process->>Client: Write to stdout\n        Server Process--)Client: Optional logs on stderr\n    end\n    Client->>Server Process: Close stdin, terminate subprocess\n    deactivate Server Process\n```\n\n## Streamable HTTP\n\n<Info>\n  This replaces the [HTTP+SSE\n  transport](/specification/2024-11-05/basic/transports#http-with-sse) from\n  protocol version 2024-11-05. See the [backwards compatibility](#backwards-compatibility)\n  guide below.\n</Info>\n\nIn the **Streamable HTTP** transport, the server operates as an independent process that\ncan handle multiple client connections. This transport uses HTTP POST and GET requests.\nServer can optionally make use of\n[Server-Sent Events](https://en.wikipedia.org/wiki/Server-sent_events) (SSE) to stream\nmultiple server messages. This permits basic MCP servers, as well as more feature-rich\nservers supporting streaming and server-to-client notifications and requests.\n\nThe server **MUST** provide a single HTTP endpoint path (hereafter referred to as the\n**MCP endpoint**) that supports both POST and GET methods. For example, this could be a\nURL like `https://example.com/mcp`.\n\n#### Security Warning\n\nWhen implementing Streamable HTTP transport:\n\n1. Servers **MUST** validate the `Origin` header on all incoming connections to prevent DNS rebinding attacks\n2. When running locally, servers **SHOULD** bind only to localhost (127.0.0.1) rather than all network interfaces (0.0.0.0)\n3. Servers **SHOULD** implement proper authentication for all connections\n\nWithout these protections, attackers could use DNS rebinding to interact with local MCP servers from remote websites.\n\n### Sending Messages to the Server\n\nEvery JSON-RPC message sent from the client **MUST** be a new HTTP POST request to the\nMCP endpoint.\n\n1. The client **MUST** use HTTP POST to send JSON-RPC messages to the MCP endpoint.\n2. The client **MUST** include an `Accept` header, listing both `application/json` and\n   `text/event-stream` as supported content types.\n3. The body of the POST request **MUST** be a single JSON-RPC *request*, *notification*, or *response*.\n4. If the input is a JSON-RPC *response* or *notification*:\n   * If the server accepts the input, the server **MUST** return HTTP status code 202\n     Accepted with no body.\n   * If the server cannot accept the input, it **MUST** return an HTTP error status code\n     (e.g., 400 Bad Request). The HTTP response body **MAY** comprise a JSON-RPC *error\n     response* that has no `id`.\n5. If the input is a JSON-RPC *request*, the server **MUST** either\n   return `Content-Type: text/event-stream`, to initiate an SSE stream, or\n   `Content-Type: application/json`, to return one JSON object. The client **MUST**\n   support both these cases.\n6. If the server initiates an SSE stream:\n   * The SSE stream **SHOULD** eventually include JSON-RPC *response* for the\n     JSON-RPC *request* sent in the POST body.\n   * The server **MAY** send JSON-RPC *requests* and *notifications* before sending the\n     JSON-RPC *response*. These messages **SHOULD** relate to the originating client\n     *request*.\n   * The server **SHOULD NOT** close the SSE stream before sending the JSON-RPC *response*\n     for the received JSON-RPC *request*, unless the [session](#session-management)\n     expires.\n   * After the JSON-RPC *response* has been sent, the server **SHOULD** close the SSE\n     stream.\n   * Disconnection **MAY** occur at any time (e.g., due to network conditions).\n     Therefore:\n     * Disconnection **SHOULD NOT** be interpreted as the client cancelling its request.\n     * To cancel, the client **SHOULD** explicitly send an MCP `CancelledNotification`.\n     * To avoid message loss due to disconnection, the server **MAY** make the stream\n       [resumable](#resumability-and-redelivery).\n\n### Listening for Messages from the Server\n\n1. The client **MAY** issue an HTTP GET to the MCP endpoint. This can be used to open an\n   SSE stream, allowing the server to communicate to the client, without the client first\n   sending data via HTTP POST.\n2. The client **MUST** include an `Accept` header, listing `text/event-stream` as a\n   supported content type.\n3. The server **MUST** either return `Content-Type: text/event-stream` in response to\n   this HTTP GET, or else return HTTP 405 Method Not Allowed, indicating that the server\n   does not offer an SSE stream at this endpoint.\n4. If the server initiates an SSE stream:\n   * The server **MAY** send JSON-RPC *requests* and *notifications* on the stream.\n   * These messages **SHOULD** be unrelated to any concurrently-running JSON-RPC\n     *request* from the client.\n   * The server **MUST NOT** send a JSON-RPC *response* on the stream **unless**\n     [resuming](#resumability-and-redelivery) a stream associated with a previous client\n     request.\n   * The server **MAY** close the SSE stream at any time.\n   * The client **MAY** close the SSE stream at any time.\n\n### Multiple Connections\n\n1. The client **MAY** remain connected to multiple SSE streams simultaneously.\n2. The server **MUST** send each of its JSON-RPC messages on only one of the connected\n   streams; that is, it **MUST NOT** broadcast the same message across multiple streams.\n   * The risk of message loss **MAY** be mitigated by making the stream\n     [resumable](#resumability-and-redelivery).\n\n### Resumability and Redelivery\n\nTo support resuming broken connections, and redelivering messages that might otherwise be\nlost:\n\n1. Servers **MAY** attach an `id` field to their SSE events, as described in the\n   [SSE standard](https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation).\n   * If present, the ID **MUST** be globally unique across all streams within that\n     [session](#session-management)—or all streams with that specific client, if session\n     management is not in use.\n2. If the client wishes to resume after a broken connection, it **SHOULD** issue an HTTP\n   GET to the MCP endpoint, and include the\n   [`Last-Event-ID`](https://html.spec.whatwg.org/multipage/server-sent-events.html#the-last-event-id-header)\n   header to indicate the last event ID it received.\n   * The server **MAY** use this header to replay messages that would have been sent\n     after the last event ID, *on the stream that was disconnected*, and to resume the\n     stream from that point.\n   * The server **MUST NOT** replay messages that would have been delivered on a\n     different stream.\n\nIn other words, these event IDs should be assigned by servers on a *per-stream* basis, to\nact as a cursor within that particular stream.\n\n### Session Management\n\nAn MCP \"session\" consists of logically related interactions between a client and a\nserver, beginning with the [initialization phase](/specification/2025-06-18/basic/lifecycle). To support\nservers which want to establish stateful sessions:\n\n1. A server using the Streamable HTTP transport **MAY** assign a session ID at\n   initialization time, by including it in an `Mcp-Session-Id` header on the HTTP\n   response containing the `InitializeResult`.\n   * The session ID **SHOULD** be globally unique and cryptographically secure (e.g., a\n     securely generated UUID, a JWT, or a cryptographic hash).\n   * The session ID **MUST** only contain visible ASCII characters (ranging from 0x21 to\n     0x7E).\n2. If an `Mcp-Session-Id` is returned by the server during initialization, clients using\n   the Streamable HTTP transport **MUST** include it in the `Mcp-Session-Id` header on\n   all of their subsequent HTTP requests.\n   * Servers that require a session ID **SHOULD** respond to requests without an\n     `Mcp-Session-Id` header (other than initialization) with HTTP 400 Bad Request.\n3. The server **MAY** terminate the session at any time, after which it **MUST** respond\n   to requests containing that session ID with HTTP 404 Not Found.\n4. When a client receives HTTP 404 in response to a request containing an\n   `Mcp-Session-Id`, it **MUST** start a new session by sending a new `InitializeRequest`\n   without a session ID attached.\n5. Clients that no longer need a particular session (e.g., because the user is leaving\n   the client application) **SHOULD** send an HTTP DELETE to the MCP endpoint with the\n   `Mcp-Session-Id` header, to explicitly terminate the session.\n   * The server **MAY** respond to this request with HTTP 405 Method Not Allowed,\n     indicating that the server does not allow clients to terminate sessions.\n\n### Sequence Diagram\n\n```mermaid\nsequenceDiagram\n    participant Client\n    participant Server\n\n    note over Client, Server: initialization\n\n    Client->>+Server: POST InitializeRequest\n    Server->>-Client: InitializeResponse<br>Mcp-Session-Id: 1868a90c...\n\n    Client->>+Server: POST InitializedNotification<br>Mcp-Session-Id: 1868a90c...\n    Server->>-Client: 202 Accepted\n\n    note over Client, Server: client requests\n    Client->>+Server: POST ... request ...<br>Mcp-Session-Id: 1868a90c...\n\n    alt single HTTP response\n      Server->>Client: ... response ...\n    else server opens SSE stream\n      loop while connection remains open\n          Server-)Client: ... SSE messages from server ...\n      end\n      Server-)Client: SSE event: ... response ...\n    end\n    deactivate Server\n\n    note over Client, Server: client notifications/responses\n    Client->>+Server: POST ... notification/response ...<br>Mcp-Session-Id: 1868a90c...\n    Server->>-Client: 202 Accepted\n\n    note over Client, Server: server requests\n    Client->>+Server: GET<br>Mcp-Session-Id: 1868a90c...\n    loop while connection remains open\n        Server-)Client: ... SSE messages from server ...\n    end\n    deactivate Server\n\n```\n\n### Protocol Version Header\n\nIf using HTTP, the client **MUST** include the `MCP-Protocol-Version: <protocol-version>` HTTP header on all subsequent requests to the MCP\nserver, allowing the MCP server to respond based on the MCP protocol version.\n\nFor example: `MCP-Protocol-Version: 2025-06-18`\n\nThe protocol version sent by the client **SHOULD** be the one [negotiated during\ninitialization](/specification/2025-06-18/basic/lifecycle#version-negotiation).\n\nFor backwards compatibility, if the server does *not* receive an `MCP-Protocol-Version`\nheader, and has no other way to identify the version - for example, by relying on the\nprotocol version negotiated during initialization - the server **SHOULD** assume protocol\nversion `2025-06-18`.\n\nIf the server receives a request with an invalid or unsupported\n`MCP-Protocol-Version`, it **MUST** respond with `400 Bad Request`.\n\n### Backwards Compatibility\n\nClients and servers can maintain backwards compatibility with the deprecated [HTTP+SSE\ntransport](/specification/2024-11-05/basic/transports#http-with-sse) (from\nprotocol version 2024-11-05) as follows:\n\n**Servers** wanting to support older clients should:\n\n* Continue to host both the SSE and POST endpoints of the old transport, alongside the\n  new \"MCP endpoint\" defined for the Streamable HTTP transport.\n  * It is also possible to combine the old POST endpoint and the new MCP endpoint, but\n    this may introduce unneeded complexity.\n\n**Clients** wanting to support older servers should:\n\n1. Accept an MCP server URL from the user, which may point to either a server using the\n   old transport or the new transport.\n2. Attempt to POST an `InitializeRequest` to the server URL, with an `Accept` header as\n   defined above:\n   * If it succeeds, the client can assume this is a server supporting the new Streamable\n     HTTP transport.\n   * If it fails with an HTTP 4xx status code (e.g., 405 Method Not Allowed or 404 Not\n     Found):\n     * Issue a GET request to the server URL, expecting that this will open an SSE stream\n       and return an `endpoint` event as the first event.\n     * When the `endpoint` event arrives, the client can assume this is a server running\n       the old HTTP+SSE transport, and should use that transport for all subsequent\n       communication.\n\n## Custom Transports\n\nClients and servers **MAY** implement additional custom transport mechanisms to suit\ntheir specific needs. The protocol is transport-agnostic and can be implemented over any\ncommunication channel that supports bidirectional message exchange.\n\nImplementers who choose to support custom transports **MUST** ensure they preserve the\nJSON-RPC message format and lifecycle requirements defined by MCP. Custom transports\n**SHOULD** document their specific connection establishment and message exchange patterns\nto aid interoperability.\n\n\n# Cancellation\nSource: https://modelcontextprotocol.io/specification/2025-06-18/basic/utilities/cancellation\n\n\n\n<div id=\"enable-section-numbers\" />\n\n<Info>**Protocol Revision**: 2025-06-18</Info>\n\nThe Model Context Protocol (MCP) supports optional cancellation of in-progress requests\nthrough notification messages. Either side can send a cancellation notification to\nindicate that a previously-issued request should be terminated.\n\n## Cancellation Flow\n\nWhen a party wants to cancel an in-progress request, it sends a `notifications/cancelled`\nnotification containing:\n\n* The ID of the request to cancel\n* An optional reason string that can be logged or displayed\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"method\": \"notifications/cancelled\",\n  \"params\": {\n    \"requestId\": \"123\",\n    \"reason\": \"User requested cancellation\"\n  }\n}\n```\n\n## Behavior Requirements\n\n1. Cancellation notifications **MUST** only reference requests that:\n   * Were previously issued in the same direction\n   * Are believed to still be in-progress\n2. The `initialize` request **MUST NOT** be cancelled by clients\n3. Receivers of cancellation notifications **SHOULD**:\n   * Stop processing the cancelled request\n   * Free associated resources\n   * Not send a response for the cancelled request\n4. Receivers **MAY** ignore cancellation notifications if:\n   * The referenced request is unknown\n   * Processing has already completed\n   * The request cannot be cancelled\n5. The sender of the cancellation notification **SHOULD** ignore any response to the\n   request that arrives afterward\n\n## Timing Considerations\n\nDue to network latency, cancellation notifications may arrive after request processing\nhas completed, and potentially after a response has already been sent.\n\nBoth parties **MUST** handle these race conditions gracefully:\n\n```mermaid\nsequenceDiagram\n   participant Client\n   participant Server\n\n   Client->>Server: Request (ID: 123)\n   Note over Server: Processing starts\n   Client--)Server: notifications/cancelled (ID: 123)\n   alt\n      Note over Server: Processing may have<br/>completed before<br/>cancellation arrives\n   else If not completed\n      Note over Server: Stop processing\n   end\n```\n\n## Implementation Notes\n\n* Both parties **SHOULD** log cancellation reasons for debugging\n* Application UIs **SHOULD** indicate when cancellation is requested\n\n## Error Handling\n\nInvalid cancellation notifications **SHOULD** be ignored:\n\n* Unknown request IDs\n* Already completed requests\n* Malformed notifications\n\nThis maintains the \"fire and forget\" nature of notifications while allowing for race\nconditions in asynchronous communication.\n\n\n# Ping\nSource: https://modelcontextprotocol.io/specification/2025-06-18/basic/utilities/ping\n\n\n\n<div id=\"enable-section-numbers\" />\n\n<Info>**Protocol Revision**: 2025-06-18</Info>\n\nThe Model Context Protocol includes an optional ping mechanism that allows either party\nto verify that their counterpart is still responsive and the connection is alive.\n\n## Overview\n\nThe ping functionality is implemented through a simple request/response pattern. Either\nthe client or server can initiate a ping by sending a `ping` request.\n\n## Message Format\n\nA ping request is a standard JSON-RPC request with no parameters:\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": \"123\",\n  \"method\": \"ping\"\n}\n```\n\n## Behavior Requirements\n\n1. The receiver **MUST** respond promptly with an empty response:\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": \"123\",\n  \"result\": {}\n}\n```\n\n2. If no response is received within a reasonable timeout period, the sender **MAY**:\n   * Consider the connection stale\n   * Terminate the connection\n   * Attempt reconnection procedures\n\n## Usage Patterns\n\n```mermaid\nsequenceDiagram\n    participant Sender\n    participant Receiver\n\n    Sender->>Receiver: ping request\n    Receiver->>Sender: empty response\n```\n\n## Implementation Considerations\n\n* Implementations **SHOULD** periodically issue pings to detect connection health\n* The frequency of pings **SHOULD** be configurable\n* Timeouts **SHOULD** be appropriate for the network environment\n* Excessive pinging **SHOULD** be avoided to reduce network overhead\n\n## Error Handling\n\n* Timeouts **SHOULD** be treated as connection failures\n* Multiple failed pings **MAY** trigger connection reset\n* Implementations **SHOULD** log ping failures for diagnostics\n\n\n# Progress\nSource: https://modelcontextprotocol.io/specification/2025-06-18/basic/utilities/progress\n\n\n\n<div id=\"enable-section-numbers\" />\n\n<Info>**Protocol Revision**: 2025-06-18</Info>\n\nThe Model Context Protocol (MCP) supports optional progress tracking for long-running\noperations through notification messages. Either side can send progress notifications to\nprovide updates about operation status.\n\n## Progress Flow\n\nWhen a party wants to *receive* progress updates for a request, it includes a\n`progressToken` in the request metadata.\n\n* Progress tokens **MUST** be a string or integer value\n* Progress tokens can be chosen by the sender using any means, but **MUST** be unique\n  across all active requests.\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"method\": \"some_method\",\n  \"params\": {\n    \"_meta\": {\n      \"progressToken\": \"abc123\"\n    }\n  }\n}\n```\n\nThe receiver **MAY** then send progress notifications containing:\n\n* The original progress token\n* The current progress value so far\n* An optional \"total\" value\n* An optional \"message\" value\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"method\": \"notifications/progress\",\n  \"params\": {\n    \"progressToken\": \"abc123\",\n    \"progress\": 50,\n    \"total\": 100,\n    \"message\": \"Reticulating splines...\"\n  }\n}\n```\n\n* The `progress` value **MUST** increase with each notification, even if the total is\n  unknown.\n* The `progress` and the `total` values **MAY** be floating point.\n* The `message` field **SHOULD** provide relevant human readable progress information.\n\n## Behavior Requirements\n\n1. Progress notifications **MUST** only reference tokens that:\n\n   * Were provided in an active request\n   * Are associated with an in-progress operation\n\n2. Receivers of progress requests **MAY**:\n   * Choose not to send any progress notifications\n   * Send notifications at whatever frequency they deem appropriate\n   * Omit the total value if unknown\n\n```mermaid\nsequenceDiagram\n    participant Sender\n    participant Receiver\n\n    Note over Sender,Receiver: Request with progress token\n    Sender->>Receiver: Method request with progressToken\n\n    Note over Sender,Receiver: Progress updates\n    loop Progress Updates\n        Receiver-->>Sender: Progress notification (0.2/1.0)\n        Receiver-->>Sender: Progress notification (0.6/1.0)\n        Receiver-->>Sender: Progress notification (1.0/1.0)\n    end\n\n    Note over Sender,Receiver: Operation complete\n    Receiver->>Sender: Method response\n```\n\n## Implementation Notes\n\n* Senders and receivers **SHOULD** track active progress tokens\n* Both parties **SHOULD** implement rate limiting to prevent flooding\n* Progress notifications **MUST** stop after completion\n\n\n# Key Changes\nSource: https://modelcontextprotocol.io/specification/2025-06-18/changelog\n\n\n\n<div id=\"enable-section-numbers\" />\n\nThis document lists changes made to the Model Context Protocol (MCP) specification since\nthe previous revision, [2025-03-26](/specification/2025-03-26).\n\n## Major changes\n\n1. Remove support for JSON-RPC **[batching](https://www.jsonrpc.org/specification#batch)**\n   (PR [#416](https://github.com/modelcontextprotocol/specification/pull/416))\n2. Add support for [structured tool output](/specification/2025-06-18/server/tools#structured-content)\n   (PR [#371](https://github.com/modelcontextprotocol/modelcontextprotocol/pull/371))\n3. Classify MCP servers as [OAuth Resource Servers](/specification/2025-06-18/basic/authorization#authorization-server-discovery),\n   adding protected resource metadata to discover the corresponding Authorization server.\n   (PR [#338](https://github.com/modelcontextprotocol/modelcontextprotocol/pull/338))\n4. Require MCP clients to implement Resource Indicators as described in [RFC 8707](https://www.rfc-editor.org/rfc/rfc8707.html) to prevent\n   malicious servers from obtaining access tokens.\n   (PR [#734](https://github.com/modelcontextprotocol/modelcontextprotocol/pull/734))\n5. Clarify [security considerations](/specification/2025-06-18/basic/authorization#security-considerations) and best practices\n   in the authorization spec and in a new [security best practices page](/specification/2025-06-18/basic/security_best_practices).\n6. Add support for **[elicitation](/specification/2025-06-18/client/elicitation)**, enabling servers to request additional\n   information from users during interactions.\n   (PR [#382](https://github.com/modelcontextprotocol/modelcontextprotocol/pull/382))\n7. Add support for **[resource links](/specification/2025-06-18/server/tools#resource-links)** in\n   tool call results. (PR [#603](https://github.com/modelcontextprotocol/modelcontextprotocol/pull/603))\n8. Require [negotiated protocol version to be specified](/specification/2025-06-18/basic/transports#protocol-version-header)\n   via `MCP-Protocol-Version` header in subsequent requests when using HTTP (PR [#548](https://github.com/modelcontextprotocol/modelcontextprotocol/pull/548)).\n9. Change **SHOULD** to **MUST** in [Lifecycle Operation](/specification/2025-06-18/basic/lifecycle#operation)\n\n## Other schema changes\n\n1. Add `_meta` field to additional interface types (PR [#710](https://github.com/modelcontextprotocol/modelcontextprotocol/pull/710)),\n   and specify [proper usage](/specification/2025-06-18/basic#meta).\n2. Add `context` field to `CompletionRequest`, providing for completion requests to include\n   previously-resolved variables (PR [#598](https://github.com/modelcontextprotocol/modelcontextprotocol/pull/598)).\n3. Add `title` field for human-friendly display names, so that `name` can be used as a programmatic\n   identifier (PR [#663](https://github.com/modelcontextprotocol/modelcontextprotocol/pull/663))\n\n## Full changelog\n\nFor a complete list of all changes that have been made since the last protocol revision,\n[see GitHub](https://github.com/modelcontextprotocol/specification/compare/2025-03-26...2025-06-18).\n\n\n# Elicitation\nSource: https://modelcontextprotocol.io/specification/2025-06-18/client/elicitation\n\n\n\n<div id=\"enable-section-numbers\" />\n\n<Info>**Protocol Revision**: 2025-06-18</Info>\n\n<Note>\n  Elicitation is newly introduced in this version of the MCP specification and its design may evolve in future protocol versions.\n</Note>\n\nThe Model Context Protocol (MCP) provides a standardized way for servers to request additional\ninformation from users through the client during interactions. This flow allows clients to\nmaintain control over user interactions and data sharing while enabling servers to gather\nnecessary information dynamically.\nServers request structured data from users with JSON schemas to validate responses.\n\n## User Interaction Model\n\nElicitation in MCP allows servers to implement interactive workflows by enabling user input\nrequests to occur *nested* inside other MCP server features.\n\nImplementations are free to expose elicitation through any interface pattern that suits\ntheir needs—the protocol itself does not mandate any specific user interaction\nmodel.\n\n<Warning>\n  For trust & safety and security:\n\n  * Servers **MUST NOT** use elicitation to request sensitive information.\n\n  Applications **SHOULD**:\n\n  * Provide UI that makes it clear which server is requesting information\n  * Allow users to review and modify their responses before sending\n  * Respect user privacy and provide clear reject and cancel options\n</Warning>\n\n## Capabilities\n\nClients that support elicitation **MUST** declare the `elicitation` capability during\n[initialization](/specification/2025-06-18/basic/lifecycle#initialization):\n\n```json\n{\n  \"capabilities\": {\n    \"elicitation\": {}\n  }\n}\n```\n\n## Protocol Messages\n\n### Creating Elicitation Requests\n\nTo request information from a user, servers send an `elicitation/create` request:\n\n#### Simple Text Request\n\n**Request:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"method\": \"elicitation/create\",\n  \"params\": {\n    \"message\": \"Please provide your GitHub username\",\n    \"requestedSchema\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"name\": {\n          \"type\": \"string\"\n        }\n      },\n      \"required\": [\"name\"]\n    }\n  }\n}\n```\n\n**Response:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"result\": {\n    \"action\": \"accept\",\n    \"content\": {\n      \"name\": \"octocat\"\n    }\n  }\n}\n```\n\n#### Structured Data Request\n\n**Request:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 2,\n  \"method\": \"elicitation/create\",\n  \"params\": {\n    \"message\": \"Please provide your contact information\",\n    \"requestedSchema\": {\n      \"type\": \"object\",\n      \"properties\": {\n        \"name\": {\n          \"type\": \"string\",\n          \"description\": \"Your full name\"\n        },\n        \"email\": {\n          \"type\": \"string\",\n          \"format\": \"email\",\n          \"description\": \"Your email address\"\n        },\n        \"age\": {\n          \"type\": \"number\",\n          \"minimum\": 18,\n          \"description\": \"Your age\"\n        }\n      },\n      \"required\": [\"name\", \"email\"]\n    }\n  }\n}\n```\n\n**Response:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 2,\n  \"result\": {\n    \"action\": \"accept\",\n    \"content\": {\n      \"name\": \"Monalisa Octocat\",\n      \"email\": \"octocat@github.com\",\n      \"age\": 30\n    }\n  }\n}\n```\n\n**Reject Response Example:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 2,\n  \"result\": {\n    \"action\": \"reject\"\n  }\n}\n```\n\n**Cancel Response Example:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 2,\n  \"result\": {\n    \"action\": \"cancel\"\n  }\n}\n```\n\n## Message Flow\n\n```mermaid\nsequenceDiagram\n    participant User\n    participant Client\n    participant Server\n\n    Note over Server,Client: Server initiates elicitation\n    Server->>Client: elicitation/create\n\n    Note over Client,User: Human interaction\n    Client->>User: Present elicitation UI\n    User-->>Client: Provide requested information\n\n    Note over Server,Client: Complete request\n    Client-->>Server: Return user response\n\n    Note over Server: Continue processing with new information\n```\n\n## Request Schema\n\nThe `requestedSchema` field allows servers to define the structure of the expected response using a restricted subset of JSON Schema. To simplify implementation for clients, elicitation schemas are limited to flat objects with primitive properties only:\n\n```json\n\"requestedSchema\": {\n  \"type\": \"object\",\n  \"properties\": {\n    \"propertyName\": {\n      \"type\": \"string\",\n      \"title\": \"Display Name\",\n      \"description\": \"Description of the property\"\n    },\n    \"anotherProperty\": {\n      \"type\": \"number\",\n      \"minimum\": 0,\n      \"maximum\": 100\n    }\n  },\n  \"required\": [\"propertyName\"]\n}\n```\n\n### Supported Schema Types\n\nThe schema is restricted to these primitive types:\n\n1. **String Schema**\n\n   ```json\n   {\n     \"type\": \"string\",\n     \"title\": \"Display Name\",\n     \"description\": \"Description text\",\n     \"minLength\": 3,\n     \"maxLength\": 50,\n     \"format\": \"email\" // Supported: \"email\", \"uri\", \"date\", \"date-time\"\n   }\n   ```\n\n   Supported formats: `email`, `uri`, `date`, `date-time`\n\n2. **Number Schema**\n\n   ```json\n   {\n     \"type\": \"number\", // or \"integer\"\n     \"title\": \"Display Name\",\n     \"description\": \"Description text\",\n     \"minimum\": 0,\n     \"maximum\": 100\n   }\n   ```\n\n3. **Boolean Schema**\n\n   ```json\n   {\n     \"type\": \"boolean\",\n     \"title\": \"Display Name\",\n     \"description\": \"Description text\",\n     \"default\": false\n   }\n   ```\n\n4. **Enum Schema**\n   ```json\n   {\n     \"type\": \"string\",\n     \"title\": \"Display Name\",\n     \"description\": \"Description text\",\n     \"enum\": [\"option1\", \"option2\", \"option3\"],\n     \"enumNames\": [\"Option 1\", \"Option 2\", \"Option 3\"]\n   }\n   ```\n\nClients can use this schema to:\n\n1. Generate appropriate input forms\n2. Validate user input before sending\n3. Provide better guidance to users\n\nNote that complex nested structures, arrays of objects, and other advanced JSON Schema features are intentionally not supported to simplify client implementation.\n\n## Response Actions\n\nElicitation responses use a three-action model to clearly distinguish between different user actions:\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"result\": {\n    \"action\": \"accept\", // or \"reject\" or \"cancel\"\n    \"content\": {\n      \"propertyName\": \"value\",\n      \"anotherProperty\": 42\n    }\n  }\n}\n```\n\nThe three response actions are:\n\n1. **Accept** (`action: \"accept\"`): User explicitly approved and submitted with data\n\n   * The `content` field contains the submitted data matching the requested schema\n   * Example: User clicked \"Submit\", \"OK\", \"Confirm\", etc.\n\n2. **Reject** (`action: \"reject\"`): User explicitly rejected the request\n\n   * The `content` field is typically omitted\n   * Example: User clicked \"Reject\", \"Decline\", \"No\", etc.\n\n3. **Cancel** (`action: \"cancel\"`): User dismissed without making an explicit choice\n   * The `content` field is typically omitted\n   * Example: User closed the dialog, clicked outside, pressed Escape, etc.\n\nServers should handle each state appropriately:\n\n* **Accept**: Process the submitted data\n* **Reject**: Handle explicit rejection (e.g., offer alternatives)\n* **Cancel**: Handle dismissal (e.g., prompt again later)\n\n## Security Considerations\n\n1. Servers **MUST NOT** request sensitive information through elicitation\n2. Clients **SHOULD** implement user approval controls\n3. Both parties **SHOULD** validate elicitation content against the provided schema\n4. Clients **SHOULD** provide clear indication of which server is requesting information\n5. Clients **SHOULD** allow users to reject elicitation requests at any time\n6. Clients **SHOULD** implement rate limiting\n7. Clients **SHOULD** present elicitation requests in a way that makes it clear what information is being requested and why\n\n\n# Roots\nSource: https://modelcontextprotocol.io/specification/2025-06-18/client/roots\n\n\n\n<div id=\"enable-section-numbers\" />\n\n<Info>**Protocol Revision**: 2025-06-18</Info>\n\nThe Model Context Protocol (MCP) provides a standardized way for clients to expose\nfilesystem \"roots\" to servers. Roots define the boundaries of where servers can operate\nwithin the filesystem, allowing them to understand which directories and files they have\naccess to. Servers can request the list of roots from supporting clients and receive\nnotifications when that list changes.\n\n## User Interaction Model\n\nRoots in MCP are typically exposed through workspace or project configuration interfaces.\n\nFor example, implementations could offer a workspace/project picker that allows users to\nselect directories and files the server should have access to. This can be combined with\nautomatic workspace detection from version control systems or project files.\n\nHowever, implementations are free to expose roots through any interface pattern that\nsuits their needs—the protocol itself does not mandate any specific user\ninteraction model.\n\n## Capabilities\n\nClients that support roots **MUST** declare the `roots` capability during\n[initialization](/specification/2025-06-18/basic/lifecycle#initialization):\n\n```json\n{\n  \"capabilities\": {\n    \"roots\": {\n      \"listChanged\": true\n    }\n  }\n}\n```\n\n`listChanged` indicates whether the client will emit notifications when the list of roots\nchanges.\n\n## Protocol Messages\n\n### Listing Roots\n\nTo retrieve roots, servers send a `roots/list` request:\n\n**Request:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"method\": \"roots/list\"\n}\n```\n\n**Response:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"result\": {\n    \"roots\": [\n      {\n        \"uri\": \"file:///home/user/projects/myproject\",\n        \"name\": \"My Project\"\n      }\n    ]\n  }\n}\n```\n\n### Root List Changes\n\nWhen roots change, clients that support `listChanged` **MUST** send a notification:\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"method\": \"notifications/roots/list_changed\"\n}\n```\n\n## Message Flow\n\n```mermaid\nsequenceDiagram\n    participant Server\n    participant Client\n\n    Note over Server,Client: Discovery\n    Server->>Client: roots/list\n    Client-->>Server: Available roots\n\n    Note over Server,Client: Changes\n    Client--)Server: notifications/roots/list_changed\n    Server->>Client: roots/list\n    Client-->>Server: Updated roots\n```\n\n## Data Types\n\n### Root\n\nA root definition includes:\n\n* `uri`: Unique identifier for the root. This **MUST** be a `file://` URI in the current\n  specification.\n* `name`: Optional human-readable name for display purposes.\n\nExample roots for different use cases:\n\n#### Project Directory\n\n```json\n{\n  \"uri\": \"file:///home/user/projects/myproject\",\n  \"name\": \"My Project\"\n}\n```\n\n#### Multiple Repositories\n\n```json\n[\n  {\n    \"uri\": \"file:///home/user/repos/frontend\",\n    \"name\": \"Frontend Repository\"\n  },\n  {\n    \"uri\": \"file:///home/user/repos/backend\",\n    \"name\": \"Backend Repository\"\n  }\n]\n```\n\n## Error Handling\n\nClients **SHOULD** return standard JSON-RPC errors for common failure cases:\n\n* Client does not support roots: `-32601` (Method not found)\n* Internal errors: `-32603`\n\nExample error:\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"error\": {\n    \"code\": -32601,\n    \"message\": \"Roots not supported\",\n    \"data\": {\n      \"reason\": \"Client does not have roots capability\"\n    }\n  }\n}\n```\n\n## Security Considerations\n\n1. Clients **MUST**:\n\n   * Only expose roots with appropriate permissions\n   * Validate all root URIs to prevent path traversal\n   * Implement proper access controls\n   * Monitor root accessibility\n\n2. Servers **SHOULD**:\n   * Handle cases where roots become unavailable\n   * Respect root boundaries during operations\n   * Validate all paths against provided roots\n\n## Implementation Guidelines\n\n1. Clients **SHOULD**:\n\n   * Prompt users for consent before exposing roots to servers\n   * Provide clear user interfaces for root management\n   * Validate root accessibility before exposing\n   * Monitor for root changes\n\n2. Servers **SHOULD**:\n   * Check for roots capability before usage\n   * Handle root list changes gracefully\n   * Respect root boundaries in operations\n   * Cache root information appropriately\n\n\n# Sampling\nSource: https://modelcontextprotocol.io/specification/2025-06-18/client/sampling\n\n\n\n<div id=\"enable-section-numbers\" />\n\n<Info>**Protocol Revision**: 2025-06-18</Info>\n\nThe Model Context Protocol (MCP) provides a standardized way for servers to request LLM\nsampling (\"completions\" or \"generations\") from language models via clients. This flow\nallows clients to maintain control over model access, selection, and permissions while\nenabling servers to leverage AI capabilities—with no server API keys necessary.\nServers can request text, audio, or image-based interactions and optionally include\ncontext from MCP servers in their prompts.\n\n## User Interaction Model\n\nSampling in MCP allows servers to implement agentic behaviors, by enabling LLM calls to\noccur *nested* inside other MCP server features.\n\nImplementations are free to expose sampling through any interface pattern that suits\ntheir needs—the protocol itself does not mandate any specific user interaction\nmodel.\n\n<Warning>\n  For trust & safety and security, there **SHOULD** always\n  be a human in the loop with the ability to deny sampling requests.\n\n  Applications **SHOULD**:\n\n  * Provide UI that makes it easy and intuitive to review sampling requests\n  * Allow users to view and edit prompts before sending\n  * Present generated responses for review before delivery\n</Warning>\n\n## Capabilities\n\nClients that support sampling **MUST** declare the `sampling` capability during\n[initialization](/specification/2025-06-18/basic/lifecycle#initialization):\n\n```json\n{\n  \"capabilities\": {\n    \"sampling\": {}\n  }\n}\n```\n\n## Protocol Messages\n\n### Creating Messages\n\nTo request a language model generation, servers send a `sampling/createMessage` request:\n\n**Request:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"method\": \"sampling/createMessage\",\n  \"params\": {\n    \"messages\": [\n      {\n        \"role\": \"user\",\n        \"content\": {\n          \"type\": \"text\",\n          \"text\": \"What is the capital of France?\"\n        }\n      }\n    ],\n    \"modelPreferences\": {\n      \"hints\": [\n        {\n          \"name\": \"claude-3-sonnet\"\n        }\n      ],\n      \"intelligencePriority\": 0.8,\n      \"speedPriority\": 0.5\n    },\n    \"systemPrompt\": \"You are a helpful assistant.\",\n    \"maxTokens\": 100\n  }\n}\n```\n\n**Response:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"result\": {\n    \"role\": \"assistant\",\n    \"content\": {\n      \"type\": \"text\",\n      \"text\": \"The capital of France is Paris.\"\n    },\n    \"model\": \"claude-3-sonnet-20240307\",\n    \"stopReason\": \"endTurn\"\n  }\n}\n```\n\n## Message Flow\n\n```mermaid\nsequenceDiagram\n    participant Server\n    participant Client\n    participant User\n    participant LLM\n\n    Note over Server,Client: Server initiates sampling\n    Server->>Client: sampling/createMessage\n\n    Note over Client,User: Human-in-the-loop review\n    Client->>User: Present request for approval\n    User-->>Client: Review and approve/modify\n\n    Note over Client,LLM: Model interaction\n    Client->>LLM: Forward approved request\n    LLM-->>Client: Return generation\n\n    Note over Client,User: Response review\n    Client->>User: Present response for approval\n    User-->>Client: Review and approve/modify\n\n    Note over Server,Client: Complete request\n    Client-->>Server: Return approved response\n```\n\n## Data Types\n\n### Messages\n\nSampling messages can contain:\n\n#### Text Content\n\n```json\n{\n  \"type\": \"text\",\n  \"text\": \"The message content\"\n}\n```\n\n#### Image Content\n\n```json\n{\n  \"type\": \"image\",\n  \"data\": \"base64-encoded-image-data\",\n  \"mimeType\": \"image/jpeg\"\n}\n```\n\n#### Audio Content\n\n```json\n{\n  \"type\": \"audio\",\n  \"data\": \"base64-encoded-audio-data\",\n  \"mimeType\": \"audio/wav\"\n}\n```\n\n### Model Preferences\n\nModel selection in MCP requires careful abstraction since servers and clients may use\ndifferent AI providers with distinct model offerings. A server cannot simply request a\nspecific model by name since the client may not have access to that exact model or may\nprefer to use a different provider's equivalent model.\n\nTo solve this, MCP implements a preference system that combines abstract capability\npriorities with optional model hints:\n\n#### Capability Priorities\n\nServers express their needs through three normalized priority values (0-1):\n\n* `costPriority`: How important is minimizing costs? Higher values prefer cheaper models.\n* `speedPriority`: How important is low latency? Higher values prefer faster models.\n* `intelligencePriority`: How important are advanced capabilities? Higher values prefer\n  more capable models.\n\n#### Model Hints\n\nWhile priorities help select models based on characteristics, `hints` allow servers to\nsuggest specific models or model families:\n\n* Hints are treated as substrings that can match model names flexibly\n* Multiple hints are evaluated in order of preference\n* Clients **MAY** map hints to equivalent models from different providers\n* Hints are advisory—clients make final model selection\n\nFor example:\n\n```json\n{\n  \"hints\": [\n    { \"name\": \"claude-3-sonnet\" }, // Prefer Sonnet-class models\n    { \"name\": \"claude\" } // Fall back to any Claude model\n  ],\n  \"costPriority\": 0.3, // Cost is less important\n  \"speedPriority\": 0.8, // Speed is very important\n  \"intelligencePriority\": 0.5 // Moderate capability needs\n}\n```\n\nThe client processes these preferences to select an appropriate model from its available\noptions. For instance, if the client doesn't have access to Claude models but has Gemini,\nit might map the sonnet hint to `gemini-1.5-pro` based on similar capabilities.\n\n## Error Handling\n\nClients **SHOULD** return errors for common failure cases:\n\nExample error:\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"error\": {\n    \"code\": -1,\n    \"message\": \"User rejected sampling request\"\n  }\n}\n```\n\n## Security Considerations\n\n1. Clients **SHOULD** implement user approval controls\n2. Both parties **SHOULD** validate message content\n3. Clients **SHOULD** respect model preference hints\n4. Clients **SHOULD** implement rate limiting\n5. Both parties **MUST** handle sensitive data appropriately\n\n\n# Specification\nSource: https://modelcontextprotocol.io/specification/2025-06-18/index\n\n\n\n<div id=\"enable-section-numbers\" />\n\n[Model Context Protocol](https://modelcontextprotocol.io) (MCP) is an open protocol that\nenables seamless integration between LLM applications and external data sources and\ntools. Whether you're building an AI-powered IDE, enhancing a chat interface, or creating\ncustom AI workflows, MCP provides a standardized way to connect LLMs with the context\nthey need.\n\nThis specification defines the authoritative protocol requirements, based on the\nTypeScript schema in\n[schema.ts](https://github.com/modelcontextprotocol/specification/blob/main/schema/2025-06-18/schema.ts).\n\nFor implementation guides and examples, visit\n[modelcontextprotocol.io](https://modelcontextprotocol.io).\n\nThe key words \"MUST\", \"MUST NOT\", \"REQUIRED\", \"SHALL\", \"SHALL NOT\", \"SHOULD\", \"SHOULD\nNOT\", \"RECOMMENDED\", \"NOT RECOMMENDED\", \"MAY\", and \"OPTIONAL\" in this document are to be\ninterpreted as described in [BCP 14](https://datatracker.ietf.org/doc/html/bcp14)\n\\[[RFC2119](https://datatracker.ietf.org/doc/html/rfc2119)]\n\\[[RFC8174](https://datatracker.ietf.org/doc/html/rfc8174)] when, and only when, they\nappear in all capitals, as shown here.\n\n## Overview\n\nMCP provides a standardized way for applications to:\n\n* Share contextual information with language models\n* Expose tools and capabilities to AI systems\n* Build composable integrations and workflows\n\nThe protocol uses [JSON-RPC](https://www.jsonrpc.org/) 2.0 messages to establish\ncommunication between:\n\n* **Hosts**: LLM applications that initiate connections\n* **Clients**: Connectors within the host application\n* **Servers**: Services that provide context and capabilities\n\nMCP takes some inspiration from the\n[Language Server Protocol](https://microsoft.github.io/language-server-protocol/), which\nstandardizes how to add support for programming languages across a whole ecosystem of\ndevelopment tools. In a similar way, MCP standardizes how to integrate additional context\nand tools into the ecosystem of AI applications.\n\n## Key Details\n\n### Base Protocol\n\n* [JSON-RPC](https://www.jsonrpc.org/) message format\n* Stateful connections\n* Server and client capability negotiation\n\n### Features\n\nServers offer any of the following features to clients:\n\n* **Resources**: Context and data, for the user or the AI model to use\n* **Prompts**: Templated messages and workflows for users\n* **Tools**: Functions for the AI model to execute\n\nClients may offer the following features to servers:\n\n* **Sampling**: Server-initiated agentic behaviors and recursive LLM interactions\n* **Roots**: Server-initiated inquiries into uri or filesystem boundaries to operate in\n* **Elicitation**: Server-initiated requests for additional information from users\n\n### Additional Utilities\n\n* Configuration\n* Progress tracking\n* Cancellation\n* Error reporting\n* Logging\n\n## Security and Trust & Safety\n\nThe Model Context Protocol enables powerful capabilities through arbitrary data access\nand code execution paths. With this power comes important security and trust\nconsiderations that all implementors must carefully address.\n\n### Key Principles\n\n1. **User Consent and Control**\n\n   * Users must explicitly consent to and understand all data access and operations\n   * Users must retain control over what data is shared and what actions are taken\n   * Implementors should provide clear UIs for reviewing and authorizing activities\n\n2. **Data Privacy**\n\n   * Hosts must obtain explicit user consent before exposing user data to servers\n   * Hosts must not transmit resource data elsewhere without user consent\n   * User data should be protected with appropriate access controls\n\n3. **Tool Safety**\n\n   * Tools represent arbitrary code execution and must be treated with appropriate\n     caution.\n     * In particular, descriptions of tool behavior such as annotations should be\n       considered untrusted, unless obtained from a trusted server.\n   * Hosts must obtain explicit user consent before invoking any tool\n   * Users should understand what each tool does before authorizing its use\n\n4. **LLM Sampling Controls**\n   * Users must explicitly approve any LLM sampling requests\n   * Users should control:\n     * Whether sampling occurs at all\n     * The actual prompt that will be sent\n     * What results the server can see\n   * The protocol intentionally limits server visibility into prompts\n\n### Implementation Guidelines\n\nWhile MCP itself cannot enforce these security principles at the protocol level,\nimplementors **SHOULD**:\n\n1. Build robust consent and authorization flows into their applications\n2. Provide clear documentation of security implications\n3. Implement appropriate access controls and data protections\n4. Follow security best practices in their integrations\n5. Consider privacy implications in their feature designs\n\n## Learn More\n\nExplore the detailed specification for each protocol component:\n\n<CardGroup cols={5}>\n  <Card title=\"Architecture\" icon=\"sitemap\" href=\"/specification/2025-06-18/architecture\" />\n\n  <Card title=\"Base Protocol\" icon=\"code\" href=\"/specification/2025-06-18/basic\" />\n\n  <Card title=\"Server Features\" icon=\"server\" href=\"/specification/2025-06-18/server\" />\n\n  <Card title=\"Client Features\" icon=\"user\" href=\"/specification/2025-06-18/client\" />\n\n  <Card title=\"Contributing\" icon=\"pencil\" href=\"/development/contributing\" />\n</CardGroup>\n\n\n# Overview\nSource: https://modelcontextprotocol.io/specification/2025-06-18/server/index\n\n\n\n<Info>**Protocol Revision**: 2025-06-18</Info>\n\nServers provide the fundamental building blocks for adding context to language models via\nMCP. These primitives enable rich interactions between clients, servers, and language\nmodels:\n\n* **Prompts**: Pre-defined templates or instructions that guide language model\n  interactions\n* **Resources**: Structured data or content that provides additional context to the model\n* **Tools**: Executable functions that allow models to perform actions or retrieve\n  information\n\nEach primitive can be summarized in the following control hierarchy:\n\n| Primitive | Control                | Description                                        | Example                         |\n| --------- | ---------------------- | -------------------------------------------------- | ------------------------------- |\n| Prompts   | User-controlled        | Interactive templates invoked by user choice       | Slash commands, menu options    |\n| Resources | Application-controlled | Contextual data attached and managed by the client | File contents, git history      |\n| Tools     | Model-controlled       | Functions exposed to the LLM to take actions       | API POST requests, file writing |\n\nExplore these key primitives in more detail below:\n\n<CardGroup cols={3}>\n  <Card title=\"Prompts\" icon=\"message\" href=\"/specification/2025-06-18/server/prompts\" />\n\n  <Card title=\"Resources\" icon=\"file-lines\" href=\"/specification/2025-06-18/server/resources\" />\n\n  <Card title=\"Tools\" icon=\"wrench\" href=\"/specification/2025-06-18/server/tools\" />\n</CardGroup>\n\n\n# Prompts\nSource: https://modelcontextprotocol.io/specification/2025-06-18/server/prompts\n\n\n\n<div id=\"enable-section-numbers\" />\n\n<Info>**Protocol Revision**: 2025-06-18</Info>\n\nThe Model Context Protocol (MCP) provides a standardized way for servers to expose prompt\ntemplates to clients. Prompts allow servers to provide structured messages and\ninstructions for interacting with language models. Clients can discover available\nprompts, retrieve their contents, and provide arguments to customize them.\n\n## User Interaction Model\n\nPrompts are designed to be **user-controlled**, meaning they are exposed from servers to\nclients with the intention of the user being able to explicitly select them for use.\n\nTypically, prompts would be triggered through user-initiated commands in the user\ninterface, which allows users to naturally discover and invoke available prompts.\n\nFor example, as slash commands:\n\n![Example of prompt exposed as slash command](https://mintlify.s3.us-west-1.amazonaws.com/mcp/specification/2025-06-18/server/slash-command.png)\n\nHowever, implementors are free to expose prompts through any interface pattern that suits\ntheir needs—the protocol itself does not mandate any specific user interaction\nmodel.\n\n## Capabilities\n\nServers that support prompts **MUST** declare the `prompts` capability during\n[initialization](/specification/2025-06-18/basic/lifecycle#initialization):\n\n```json\n{\n  \"capabilities\": {\n    \"prompts\": {\n      \"listChanged\": true\n    }\n  }\n}\n```\n\n`listChanged` indicates whether the server will emit notifications when the list of\navailable prompts changes.\n\n## Protocol Messages\n\n### Listing Prompts\n\nTo retrieve available prompts, clients send a `prompts/list` request. This operation\nsupports [pagination](/specification/2025-06-18/server/utilities/pagination).\n\n**Request:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"method\": \"prompts/list\",\n  \"params\": {\n    \"cursor\": \"optional-cursor-value\"\n  }\n}\n```\n\n**Response:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"result\": {\n    \"prompts\": [\n      {\n        \"name\": \"code_review\",\n        \"title\": \"Request Code Review\",\n        \"description\": \"Asks the LLM to analyze code quality and suggest improvements\",\n        \"arguments\": [\n          {\n            \"name\": \"code\",\n            \"description\": \"The code to review\",\n            \"required\": true\n          }\n        ]\n      }\n    ],\n    \"nextCursor\": \"next-page-cursor\"\n  }\n}\n```\n\n### Getting a Prompt\n\nTo retrieve a specific prompt, clients send a `prompts/get` request. Arguments may be\nauto-completed through [the completion API](/specification/2025-06-18/server/utilities/completion).\n\n**Request:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 2,\n  \"method\": \"prompts/get\",\n  \"params\": {\n    \"name\": \"code_review\",\n    \"arguments\": {\n      \"code\": \"def hello():\\n    print('world')\"\n    }\n  }\n}\n```\n\n**Response:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 2,\n  \"result\": {\n    \"description\": \"Code review prompt\",\n    \"messages\": [\n      {\n        \"role\": \"user\",\n        \"content\": {\n          \"type\": \"text\",\n          \"text\": \"Please review this Python code:\\ndef hello():\\n    print('world')\"\n        }\n      }\n    ]\n  }\n}\n```\n\n### List Changed Notification\n\nWhen the list of available prompts changes, servers that declared the `listChanged`\ncapability **SHOULD** send a notification:\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"method\": \"notifications/prompts/list_changed\"\n}\n```\n\n## Message Flow\n\n```mermaid\nsequenceDiagram\n    participant Client\n    participant Server\n\n    Note over Client,Server: Discovery\n    Client->>Server: prompts/list\n    Server-->>Client: List of prompts\n\n    Note over Client,Server: Usage\n    Client->>Server: prompts/get\n    Server-->>Client: Prompt content\n\n    opt listChanged\n      Note over Client,Server: Changes\n      Server--)Client: prompts/list_changed\n      Client->>Server: prompts/list\n      Server-->>Client: Updated prompts\n    end\n```\n\n## Data Types\n\n### Prompt\n\nA prompt definition includes:\n\n* `name`: Unique identifier for the prompt\n* `title`: Optional human-readable name of the prompt for display purposes.\n* `description`: Optional human-readable description\n* `arguments`: Optional list of arguments for customization\n\n### PromptMessage\n\nMessages in a prompt can contain:\n\n* `role`: Either \"user\" or \"assistant\" to indicate the speaker\n* `content`: One of the following content types:\n\n#### Text Content\n\nText content represents plain text messages:\n\n```json\n{\n  \"type\": \"text\",\n  \"text\": \"The text content of the message\"\n}\n```\n\nThis is the most common content type used for natural language interactions.\n\n#### Image Content\n\nImage content allows including visual information in messages:\n\n```json\n{\n  \"type\": \"image\",\n  \"data\": \"base64-encoded-image-data\",\n  \"mimeType\": \"image/png\"\n}\n```\n\nThe image data **MUST** be base64-encoded and include a valid MIME type. This enables\nmulti-modal interactions where visual context is important.\n\n#### Audio Content\n\nAudio content allows including audio information in messages:\n\n```json\n{\n  \"type\": \"audio\",\n  \"data\": \"base64-encoded-audio-data\",\n  \"mimeType\": \"audio/wav\"\n}\n```\n\nThe audio data MUST be base64-encoded and include a valid MIME type. This enables\nmulti-modal interactions where audio context is important.\n\n#### Embedded Resources\n\nEmbedded resources allow referencing server-side resources directly in messages:\n\n```json\n{\n  \"type\": \"resource\",\n  \"resource\": {\n    \"uri\": \"resource://example\",\n    \"name\": \"example\",\n    \"title\": \"My Example Resource\",\n    \"mimeType\": \"text/plain\",\n    \"text\": \"Resource content\"\n  }\n}\n```\n\nResources can contain either text or binary (blob) data and **MUST** include:\n\n* A valid resource URI\n* The appropriate MIME type\n* Either text content or base64-encoded blob data\n\nEmbedded resources enable prompts to seamlessly incorporate server-managed content like\ndocumentation, code samples, or other reference materials directly into the conversation\nflow.\n\n## Error Handling\n\nServers **SHOULD** return standard JSON-RPC errors for common failure cases:\n\n* Invalid prompt name: `-32602` (Invalid params)\n* Missing required arguments: `-32602` (Invalid params)\n* Internal errors: `-32603` (Internal error)\n\n## Implementation Considerations\n\n1. Servers **SHOULD** validate prompt arguments before processing\n2. Clients **SHOULD** handle pagination for large prompt lists\n3. Both parties **SHOULD** respect capability negotiation\n\n## Security\n\nImplementations **MUST** carefully validate all prompt inputs and outputs to prevent\ninjection attacks or unauthorized access to resources.\n\n\n# Resources\nSource: https://modelcontextprotocol.io/specification/2025-06-18/server/resources\n\n\n\n<div id=\"enable-section-numbers\" />\n\n<Info>**Protocol Revision**: 2025-06-18</Info>\n\nThe Model Context Protocol (MCP) provides a standardized way for servers to expose\nresources to clients. Resources allow servers to share data that provides context to\nlanguage models, such as files, database schemas, or application-specific information.\nEach resource is uniquely identified by a\n[URI](https://datatracker.ietf.org/doc/html/rfc3986).\n\n## User Interaction Model\n\nResources in MCP are designed to be **application-driven**, with host applications\ndetermining how to incorporate context based on their needs.\n\nFor example, applications could:\n\n* Expose resources through UI elements for explicit selection, in a tree or list view\n* Allow the user to search through and filter available resources\n* Implement automatic context inclusion, based on heuristics or the AI model's selection\n\n![Example of resource context picker](https://mintlify.s3.us-west-1.amazonaws.com/mcp/specification/2025-06-18/server/resource-picker.png)\n\nHowever, implementations are free to expose resources through any interface pattern that\nsuits their needs—the protocol itself does not mandate any specific user\ninteraction model.\n\n## Capabilities\n\nServers that support resources **MUST** declare the `resources` capability:\n\n```json\n{\n  \"capabilities\": {\n    \"resources\": {\n      \"subscribe\": true,\n      \"listChanged\": true\n    }\n  }\n}\n```\n\nThe capability supports two optional features:\n\n* `subscribe`: whether the client can subscribe to be notified of changes to individual\n  resources.\n* `listChanged`: whether the server will emit notifications when the list of available\n  resources changes.\n\nBoth `subscribe` and `listChanged` are optional—servers can support neither,\neither, or both:\n\n```json\n{\n  \"capabilities\": {\n    \"resources\": {} // Neither feature supported\n  }\n}\n```\n\n```json\n{\n  \"capabilities\": {\n    \"resources\": {\n      \"subscribe\": true // Only subscriptions supported\n    }\n  }\n}\n```\n\n```json\n{\n  \"capabilities\": {\n    \"resources\": {\n      \"listChanged\": true // Only list change notifications supported\n    }\n  }\n}\n```\n\n## Protocol Messages\n\n### Listing Resources\n\nTo discover available resources, clients send a `resources/list` request. This operation\nsupports [pagination](/specification/2025-06-18/server/utilities/pagination).\n\n**Request:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"method\": \"resources/list\",\n  \"params\": {\n    \"cursor\": \"optional-cursor-value\"\n  }\n}\n```\n\n**Response:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"result\": {\n    \"resources\": [\n      {\n        \"uri\": \"file:///project/src/main.rs\",\n        \"name\": \"main.rs\",\n        \"title\": \"Rust Software Application Main File\",\n        \"description\": \"Primary application entry point\",\n        \"mimeType\": \"text/x-rust\"\n      }\n    ],\n    \"nextCursor\": \"next-page-cursor\"\n  }\n}\n```\n\n### Reading Resources\n\nTo retrieve resource contents, clients send a `resources/read` request:\n\n**Request:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 2,\n  \"method\": \"resources/read\",\n  \"params\": {\n    \"uri\": \"file:///project/src/main.rs\"\n  }\n}\n```\n\n**Response:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 2,\n  \"result\": {\n    \"contents\": [\n      {\n        \"uri\": \"file:///project/src/main.rs\",\n        \"name\": \"main.rs\",\n        \"title\": \"Rust Software Application Main File\",\n        \"mimeType\": \"text/x-rust\",\n        \"text\": \"fn main() {\\n    println!(\\\"Hello world!\\\");\\n}\"\n      }\n    ]\n  }\n}\n```\n\n### Resource Templates\n\nResource templates allow servers to expose parameterized resources using\n[URI templates](https://datatracker.ietf.org/doc/html/rfc6570). Arguments may be\nauto-completed through [the completion API](/specification/2025-06-18/server/utilities/completion).\n\n**Request:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 3,\n  \"method\": \"resources/templates/list\"\n}\n```\n\n**Response:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 3,\n  \"result\": {\n    \"resourceTemplates\": [\n      {\n        \"uriTemplate\": \"file:///{path}\",\n        \"name\": \"Project Files\",\n        \"title\": \"📁 Project Files\",\n        \"description\": \"Access files in the project directory\",\n        \"mimeType\": \"application/octet-stream\"\n      }\n    ]\n  }\n}\n```\n\n### List Changed Notification\n\nWhen the list of available resources changes, servers that declared the `listChanged`\ncapability **SHOULD** send a notification:\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"method\": \"notifications/resources/list_changed\"\n}\n```\n\n### Subscriptions\n\nThe protocol supports optional subscriptions to resource changes. Clients can subscribe\nto specific resources and receive notifications when they change:\n\n**Subscribe Request:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 4,\n  \"method\": \"resources/subscribe\",\n  \"params\": {\n    \"uri\": \"file:///project/src/main.rs\"\n  }\n}\n```\n\n**Update Notification:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"method\": \"notifications/resources/updated\",\n  \"params\": {\n    \"uri\": \"file:///project/src/main.rs\",\n    \"title\": \"Rust Software Application Main File\"\n  }\n}\n```\n\n## Message Flow\n\n```mermaid\nsequenceDiagram\n    participant Client\n    participant Server\n\n    Note over Client,Server: Resource Discovery\n    Client->>Server: resources/list\n    Server-->>Client: List of resources\n\n    Note over Client,Server: Resource Access\n    Client->>Server: resources/read\n    Server-->>Client: Resource contents\n\n    Note over Client,Server: Subscriptions\n    Client->>Server: resources/subscribe\n    Server-->>Client: Subscription confirmed\n\n    Note over Client,Server: Updates\n    Server--)Client: notifications/resources/updated\n    Client->>Server: resources/read\n    Server-->>Client: Updated contents\n```\n\n## Data Types\n\n### Resource\n\nA resource definition includes:\n\n* `uri`: Unique identifier for the resource\n* `name`: The name of the resource.\n* `title`: Optional human-readable name of the resource for display purposes.\n* `description`: Optional description\n* `mimeType`: Optional MIME type\n* `size`: Optional size in bytes\n\n### Resource Contents\n\nResources can contain either text or binary data:\n\n#### Text Content\n\n```json\n{\n  \"uri\": \"file:///example.txt\",\n  \"name\": \"example.txt\",\n  \"title\": \"Example Text File\",\n  \"mimeType\": \"text/plain\",\n  \"text\": \"Resource content\"\n}\n```\n\n#### Binary Content\n\n```json\n{\n  \"uri\": \"file:///example.png\",\n  \"name\": \"example.png\",\n  \"title\": \"Example Image\",\n  \"mimeType\": \"image/png\",\n  \"blob\": \"base64-encoded-data\"\n}\n```\n\n## Common URI Schemes\n\nThe protocol defines several standard URI schemes. This list not\nexhaustive—implementations are always free to use additional, custom URI schemes.\n\n### https\\://\n\nUsed to represent a resource available on the web.\n\nServers **SHOULD** use this scheme only when the client is able to fetch and load the\nresource directly from the web on its own—that is, it doesn’t need to read the resource\nvia the MCP server.\n\nFor other use cases, servers **SHOULD** prefer to use another URI scheme, or define a\ncustom one, even if the server will itself be downloading resource contents over the\ninternet.\n\n### file://\n\nUsed to identify resources that behave like a filesystem. However, the resources do not\nneed to map to an actual physical filesystem.\n\nMCP servers **MAY** identify file:// resources with an\n[XDG MIME type](https://specifications.freedesktop.org/shared-mime-info-spec/0.14/ar01s02.html#id-1.3.14),\nlike `inode/directory`, to represent non-regular files (such as directories) that don’t\notherwise have a standard MIME type.\n\n### git://\n\nGit version control integration.\n\n### Custom URI Schemes\n\nCustom URI schemes **MUST** be in accordance with [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986),\ntaking the above guidance in to account.\n\n## Error Handling\n\nServers **SHOULD** return standard JSON-RPC errors for common failure cases:\n\n* Resource not found: `-32002`\n* Internal errors: `-32603`\n\nExample error:\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 5,\n  \"error\": {\n    \"code\": -32002,\n    \"message\": \"Resource not found\",\n    \"data\": {\n      \"uri\": \"file:///nonexistent.txt\"\n    }\n  }\n}\n```\n\n## Security Considerations\n\n1. Servers **MUST** validate all resource URIs\n2. Access controls **SHOULD** be implemented for sensitive resources\n3. Binary data **MUST** be properly encoded\n4. Resource permissions **SHOULD** be checked before operations\n\n\n# Tools\nSource: https://modelcontextprotocol.io/specification/2025-06-18/server/tools\n\n\n\n<div id=\"enable-section-numbers\" />\n\n<Info>**Protocol Revision**: 2025-06-18</Info>\n\nThe Model Context Protocol (MCP) allows servers to expose tools that can be invoked by\nlanguage models. Tools enable models to interact with external systems, such as querying\ndatabases, calling APIs, or performing computations. Each tool is uniquely identified by\na name and includes metadata describing its schema.\n\n## User Interaction Model\n\nTools in MCP are designed to be **model-controlled**, meaning that the language model can\ndiscover and invoke tools automatically based on its contextual understanding and the\nuser's prompts.\n\nHowever, implementations are free to expose tools through any interface pattern that\nsuits their needs—the protocol itself does not mandate any specific user\ninteraction model.\n\n<Warning>\n  For trust & safety and security, there **SHOULD** always\n  be a human in the loop with the ability to deny tool invocations.\n\n  Applications **SHOULD**:\n\n  * Provide UI that makes clear which tools are being exposed to the AI model\n  * Insert clear visual indicators when tools are invoked\n  * Present confirmation prompts to the user for operations, to ensure a human is in the\n    loop\n</Warning>\n\n## Capabilities\n\nServers that support tools **MUST** declare the `tools` capability:\n\n```json\n{\n  \"capabilities\": {\n    \"tools\": {\n      \"listChanged\": true\n    }\n  }\n}\n```\n\n`listChanged` indicates whether the server will emit notifications when the list of\navailable tools changes.\n\n## Protocol Messages\n\n### Listing Tools\n\nTo discover available tools, clients send a `tools/list` request. This operation supports\n[pagination](/specification/2025-06-18/server/utilities/pagination).\n\n**Request:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"method\": \"tools/list\",\n  \"params\": {\n    \"cursor\": \"optional-cursor-value\"\n  }\n}\n```\n\n**Response:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"result\": {\n    \"tools\": [\n      {\n        \"name\": \"get_weather\",\n        \"title\": \"Weather Information Provider\",\n        \"description\": \"Get current weather information for a location\",\n        \"inputSchema\": {\n          \"type\": \"object\",\n          \"properties\": {\n            \"location\": {\n              \"type\": \"string\",\n              \"description\": \"City name or zip code\"\n            }\n          },\n          \"required\": [\"location\"]\n        }\n      }\n    ],\n    \"nextCursor\": \"next-page-cursor\"\n  }\n}\n```\n\n### Calling Tools\n\nTo invoke a tool, clients send a `tools/call` request:\n\n**Request:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 2,\n  \"method\": \"tools/call\",\n  \"params\": {\n    \"name\": \"get_weather\",\n    \"arguments\": {\n      \"location\": \"New York\"\n    }\n  }\n}\n```\n\n**Response:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 2,\n  \"result\": {\n    \"content\": [\n      {\n        \"type\": \"text\",\n        \"text\": \"Current weather in New York:\\nTemperature: 72°F\\nConditions: Partly cloudy\"\n      }\n    ],\n    \"isError\": false\n  }\n}\n```\n\n### List Changed Notification\n\nWhen the list of available tools changes, servers that declared the `listChanged`\ncapability **SHOULD** send a notification:\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"method\": \"notifications/tools/list_changed\"\n}\n```\n\n## Message Flow\n\n```mermaid\nsequenceDiagram\n    participant LLM\n    participant Client\n    participant Server\n\n    Note over Client,Server: Discovery\n    Client->>Server: tools/list\n    Server-->>Client: List of tools\n\n    Note over Client,LLM: Tool Selection\n    LLM->>Client: Select tool to use\n\n    Note over Client,Server: Invocation\n    Client->>Server: tools/call\n    Server-->>Client: Tool result\n    Client->>LLM: Process result\n\n    Note over Client,Server: Updates\n    Server--)Client: tools/list_changed\n    Client->>Server: tools/list\n    Server-->>Client: Updated tools\n```\n\n## Data Types\n\n### Tool\n\nA tool definition includes:\n\n* `name`: Unique identifier for the tool\n* `title`: Optional human-readable name of the tool for display purposes.\n* `description`: Human-readable description of functionality\n* `inputSchema`: JSON Schema defining expected parameters\n* `outputSchema`: Optional JSON Schema defining expected output structure\n* `annotations`: optional properties describing tool behavior\n\n<Warning>\n  For trust & safety and security, clients **MUST** consider\n  tool annotations to be untrusted unless they come from trusted servers.\n</Warning>\n\n### Tool Result\n\nTool results may contain [**structured**](#structured-content) or **unstructured** content.\n\n**Unstructured** content is returned in the `content` field of a result, and can contain multiple content items of different types:\n\n#### Text Content\n\n```json\n{\n  \"type\": \"text\",\n  \"text\": \"Tool result text\"\n}\n```\n\n#### Image Content\n\n```json\n{\n  \"type\": \"image\",\n  \"data\": \"base64-encoded-data\",\n  \"mimeType\": \"image/png\"\n}\n```\n\n#### Audio Content\n\n```json\n{\n  \"type\": \"audio\",\n  \"data\": \"base64-encoded-audio-data\",\n  \"mimeType\": \"audio/wav\"\n}\n```\n\n#### Resource Links\n\nA tool **MAY** return links to [Resources](/specification/2025-06-18/server/resources), to provide additional context\nor data. In this case, the tool will return a URI that can be subscribed to or fetched by the client:\n\n```json\n{\n  \"type\": \"resource_link\",\n  \"uri\": \"file:///project/src/main.rs\",\n  \"name\": \"main.rs\",\n  \"description\": \"Primary application entry point\",\n  \"mimeType\": \"text/x-rust\"\n}\n```\n\n<Info>\n  Resource links returned by tools are not guaranteed to appear in the results\n  of a `resources/list` request.\n</Info>\n\n#### Embedded Resources\n\n[Resources](/specification/2025-06-18/server/resources) **MAY** be embedded to provide additional context\nor data using a suitable [URI scheme](./resources#common-uri-schemes). Servers that use embedded resources **SHOULD** implement the `resources` capability:\n\n```json\n{\n  \"type\": \"resource\",\n  \"resource\": {\n    \"uri\": \"file:///project/src/main.rs\",\n    \"title\": \"Project Rust Main File\",\n    \"mimeType\": \"text/x-rust\",\n    \"text\": \"fn main() {\\n    println!(\\\"Hello world!\\\");\\n}\"\n  }\n}\n```\n\n#### Structured Content\n\n**Structured** content is returned as a JSON object in the `structuredContent` field of a result.\n\nFor backwards compatibility, a tool that returns structured content SHOULD also return functionally equivalent unstructured content.\n(For example, serialized JSON can be returned in a `TextContent` block.)\n\n#### Output Schema\n\nTools may also provide an output schema for validation of structured results.\nIf an output schema is provided:\n\n* Servers **MUST** provide structured results that conform to this schema.\n* Clients **SHOULD** validate structured results against this schema.\n\nExample tool with output schema:\n\n```json\n{\n  \"name\": \"get_weather_data\",\n  \"title\": \"Weather Data Retriever\",\n  \"description\": \"Get current weather data for a location\",\n  \"inputSchema\": {\n    \"type\": \"object\",\n    \"properties\": {\n      \"location\": {\n        \"type\": \"string\",\n        \"description\": \"City name or zip code\"\n      }\n    },\n    \"required\": [\"location\"]\n  },\n  \"outputSchema\": {\n    \"type\": \"object\",\n    \"properties\": {\n      \"temperature\": {\n        \"type\": \"number\",\n        \"description\": \"Temperature in celsius\"\n      },\n      \"conditions\": {\n        \"type\": \"string\",\n        \"description\": \"Weather conditions description\"\n      },\n      \"humidity\": {\n        \"type\": \"number\",\n        \"description\": \"Humidity percentage\"\n      }\n    },\n    \"required\": [\"temperature\", \"conditions\", \"humidity\"]\n  }\n}\n```\n\nExample valid response for this tool:\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 5,\n  \"result\": {\n    \"content\": [\n      {\n        \"type\": \"text\",\n        \"text\": \"{\\\"temperature\\\": 22.5, \\\"conditions\\\": \\\"Partly cloudy\\\", \\\"humidity\\\": 65}\"\n      }\n    ],\n    \"structuredContent\": {\n      \"temperature\": 22.5,\n      \"conditions\": \"Partly cloudy\",\n      \"humidity\": 65\n    }\n  }\n}\n```\n\nProviding an output schema helps clients and LLMs understand and properly handle structured tool outputs by:\n\n* Enabling strict schema validation of responses\n* Providing type information for better integration with programming languages\n* Guiding clients and LLMs to properly parse and utilize the returned data\n* Supporting better documentation and developer experience\n\n## Error Handling\n\nTools use two error reporting mechanisms:\n\n1. **Protocol Errors**: Standard JSON-RPC errors for issues like:\n\n   * Unknown tools\n   * Invalid arguments\n   * Server errors\n\n2. **Tool Execution Errors**: Reported in tool results with `isError: true`:\n   * API failures\n   * Invalid input data\n   * Business logic errors\n\nExample protocol error:\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 3,\n  \"error\": {\n    \"code\": -32602,\n    \"message\": \"Unknown tool: invalid_tool_name\"\n  }\n}\n```\n\nExample tool execution error:\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 4,\n  \"result\": {\n    \"content\": [\n      {\n        \"type\": \"text\",\n        \"text\": \"Failed to fetch weather data: API rate limit exceeded\"\n      }\n    ],\n    \"isError\": true\n  }\n}\n```\n\n## Security Considerations\n\n1. Servers **MUST**:\n\n   * Validate all tool inputs\n   * Implement proper access controls\n   * Rate limit tool invocations\n   * Sanitize tool outputs\n\n2. Clients **SHOULD**:\n   * Prompt for user confirmation on sensitive operations\n   * Show tool inputs to the user before calling the server, to avoid malicious or\n     accidental data exfiltration\n   * Validate tool results before passing to LLM\n   * Implement timeouts for tool calls\n   * Log tool usage for audit purposes\n\n\n# Completion\nSource: https://modelcontextprotocol.io/specification/2025-06-18/server/utilities/completion\n\n\n\n<div id=\"enable-section-numbers\" />\n\n<Info>**Protocol Revision**: 2025-06-18</Info>\n\nThe Model Context Protocol (MCP) provides a standardized way for servers to offer\nargument autocompletion suggestions for prompts and resource URIs. This enables rich,\nIDE-like experiences where users receive contextual suggestions while entering argument\nvalues.\n\n## User Interaction Model\n\nCompletion in MCP is designed to support interactive user experiences similar to IDE code\ncompletion.\n\nFor example, applications may show completion suggestions in a dropdown or popup menu as\nusers type, with the ability to filter and select from available options.\n\nHowever, implementations are free to expose completion through any interface pattern that\nsuits their needs—the protocol itself does not mandate any specific user\ninteraction model.\n\n## Capabilities\n\nServers that support completions **MUST** declare the `completions` capability:\n\n```json\n{\n  \"capabilities\": {\n    \"completions\": {}\n  }\n}\n```\n\n## Protocol Messages\n\n### Requesting Completions\n\nTo get completion suggestions, clients send a `completion/complete` request specifying\nwhat is being completed through a reference type:\n\n**Request:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"method\": \"completion/complete\",\n  \"params\": {\n    \"ref\": {\n      \"type\": \"ref/prompt\",\n      \"name\": \"code_review\"\n    },\n    \"argument\": {\n      \"name\": \"language\",\n      \"value\": \"py\"\n    }\n  }\n}\n```\n\n**Response:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"result\": {\n    \"completion\": {\n      \"values\": [\"python\", \"pytorch\", \"pyside\"],\n      \"total\": 10,\n      \"hasMore\": true\n    }\n  }\n}\n```\n\nFor prompts or URI templates with multiple arguments, clients should include previous completions in the `context.arguments` object to provide context for subsequent requests.\n\n**Request:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"method\": \"completion/complete\",\n  \"params\": {\n    \"ref\": {\n      \"type\": \"ref/prompt\",\n      \"name\": \"code_review\"\n    },\n    \"argument\": {\n      \"name\": \"framework\",\n      \"value\": \"fla\"\n    },\n    \"context\": {\n      \"arguments\": {\n        \"language\": \"python\"\n      }\n    }\n  }\n}\n```\n\n**Response:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"result\": {\n    \"completion\": {\n      \"values\": [\"flask\"],\n      \"total\": 1,\n      \"hasMore\": false\n    }\n  }\n}\n```\n\n### Reference Types\n\nThe protocol supports two types of completion references:\n\n| Type           | Description                 | Example                                             |\n| -------------- | --------------------------- | --------------------------------------------------- |\n| `ref/prompt`   | References a prompt by name | `{\"type\": \"ref/prompt\", \"name\": \"code_review\"}`     |\n| `ref/resource` | References a resource URI   | `{\"type\": \"ref/resource\", \"uri\": \"file:///{path}\"}` |\n\n### Completion Results\n\nServers return an array of completion values ranked by relevance, with:\n\n* Maximum 100 items per response\n* Optional total number of available matches\n* Boolean indicating if additional results exist\n\n## Message Flow\n\n```mermaid\nsequenceDiagram\n    participant Client\n    participant Server\n\n    Note over Client: User types argument\n    Client->>Server: completion/complete\n    Server-->>Client: Completion suggestions\n\n    Note over Client: User continues typing\n    Client->>Server: completion/complete\n    Server-->>Client: Refined suggestions\n```\n\n## Data Types\n\n### CompleteRequest\n\n* `ref`: A `PromptReference` or `ResourceReference`\n* `argument`: Object containing:\n  * `name`: Argument name\n  * `value`: Current value\n* `context`: Object containing:\n  * `arguments`: A mapping of already-resolved argument names to their values.\n\n### CompleteResult\n\n* `completion`: Object containing:\n  * `values`: Array of suggestions (max 100)\n  * `total`: Optional total matches\n  * `hasMore`: Additional results flag\n\n## Error Handling\n\nServers **SHOULD** return standard JSON-RPC errors for common failure cases:\n\n* Method not found: `-32601` (Capability not supported)\n* Invalid prompt name: `-32602` (Invalid params)\n* Missing required arguments: `-32602` (Invalid params)\n* Internal errors: `-32603` (Internal error)\n\n## Implementation Considerations\n\n1. Servers **SHOULD**:\n\n   * Return suggestions sorted by relevance\n   * Implement fuzzy matching where appropriate\n   * Rate limit completion requests\n   * Validate all inputs\n\n2. Clients **SHOULD**:\n   * Debounce rapid completion requests\n   * Cache completion results where appropriate\n   * Handle missing or partial results gracefully\n\n## Security\n\nImplementations **MUST**:\n\n* Validate all completion inputs\n* Implement appropriate rate limiting\n* Control access to sensitive suggestions\n* Prevent completion-based information disclosure\n\n\n# Logging\nSource: https://modelcontextprotocol.io/specification/2025-06-18/server/utilities/logging\n\n\n\n<div id=\"enable-section-numbers\" />\n\n<Info>**Protocol Revision**: 2025-06-18</Info>\n\nThe Model Context Protocol (MCP) provides a standardized way for servers to send\nstructured log messages to clients. Clients can control logging verbosity by setting\nminimum log levels, with servers sending notifications containing severity levels,\noptional logger names, and arbitrary JSON-serializable data.\n\n## User Interaction Model\n\nImplementations are free to expose logging through any interface pattern that suits their\nneeds—the protocol itself does not mandate any specific user interaction model.\n\n## Capabilities\n\nServers that emit log message notifications **MUST** declare the `logging` capability:\n\n```json\n{\n  \"capabilities\": {\n    \"logging\": {}\n  }\n}\n```\n\n## Log Levels\n\nThe protocol follows the standard syslog severity levels specified in\n[RFC 5424](https://datatracker.ietf.org/doc/html/rfc5424#section-6.2.1):\n\n| Level     | Description                      | Example Use Case           |\n| --------- | -------------------------------- | -------------------------- |\n| debug     | Detailed debugging information   | Function entry/exit points |\n| info      | General informational messages   | Operation progress updates |\n| notice    | Normal but significant events    | Configuration changes      |\n| warning   | Warning conditions               | Deprecated feature usage   |\n| error     | Error conditions                 | Operation failures         |\n| critical  | Critical conditions              | System component failures  |\n| alert     | Action must be taken immediately | Data corruption detected   |\n| emergency | System is unusable               | Complete system failure    |\n\n## Protocol Messages\n\n### Setting Log Level\n\nTo configure the minimum log level, clients **MAY** send a `logging/setLevel` request:\n\n**Request:**\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": 1,\n  \"method\": \"logging/setLevel\",\n  \"params\": {\n    \"level\": \"info\"\n  }\n}\n```\n\n### Log Message Notifications\n\nServers send log messages using `notifications/message` notifications:\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"method\": \"notifications/message\",\n  \"params\": {\n    \"level\": \"error\",\n    \"logger\": \"database\",\n    \"data\": {\n      \"error\": \"Connection failed\",\n      \"details\": {\n        \"host\": \"localhost\",\n        \"port\": 5432\n      }\n    }\n  }\n}\n```\n\n## Message Flow\n\n```mermaid\nsequenceDiagram\n    participant Client\n    participant Server\n\n    Note over Client,Server: Configure Logging\n    Client->>Server: logging/setLevel (info)\n    Server-->>Client: Empty Result\n\n    Note over Client,Server: Server Activity\n    Server--)Client: notifications/message (info)\n    Server--)Client: notifications/message (warning)\n    Server--)Client: notifications/message (error)\n\n    Note over Client,Server: Level Change\n    Client->>Server: logging/setLevel (error)\n    Server-->>Client: Empty Result\n    Note over Server: Only sends error level<br/>and above\n```\n\n## Error Handling\n\nServers **SHOULD** return standard JSON-RPC errors for common failure cases:\n\n* Invalid log level: `-32602` (Invalid params)\n* Configuration errors: `-32603` (Internal error)\n\n## Implementation Considerations\n\n1. Servers **SHOULD**:\n\n   * Rate limit log messages\n   * Include relevant context in data field\n   * Use consistent logger names\n   * Remove sensitive information\n\n2. Clients **MAY**:\n   * Present log messages in the UI\n   * Implement log filtering/search\n   * Display severity visually\n   * Persist log messages\n\n## Security\n\n1. Log messages **MUST NOT** contain:\n\n   * Credentials or secrets\n   * Personal identifying information\n   * Internal system details that could aid attacks\n\n2. Implementations **SHOULD**:\n   * Rate limit messages\n   * Validate all data fields\n   * Control log access\n   * Monitor for sensitive content\n\n\n# Pagination\nSource: https://modelcontextprotocol.io/specification/2025-06-18/server/utilities/pagination\n\n\n\n<div id=\"enable-section-numbers\" />\n\n<Info>**Protocol Revision**: 2025-06-18</Info>\n\nThe Model Context Protocol (MCP) supports paginating list operations that may return\nlarge result sets. Pagination allows servers to yield results in smaller chunks rather\nthan all at once.\n\nPagination is especially important when connecting to external services over the\ninternet, but also useful for local integrations to avoid performance issues with large\ndata sets.\n\n## Pagination Model\n\nPagination in MCP uses an opaque cursor-based approach, instead of numbered pages.\n\n* The **cursor** is an opaque string token, representing a position in the result set\n* **Page size** is determined by the server, and clients **MUST NOT** assume a fixed page\n  size\n\n## Response Format\n\nPagination starts when the server sends a **response** that includes:\n\n* The current page of results\n* An optional `nextCursor` field if more results exist\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"id\": \"123\",\n  \"result\": {\n    \"resources\": [...],\n    \"nextCursor\": \"eyJwYWdlIjogM30=\"\n  }\n}\n```\n\n## Request Format\n\nAfter receiving a cursor, the client can *continue* paginating by issuing a request\nincluding that cursor:\n\n```json\n{\n  \"jsonrpc\": \"2.0\",\n  \"method\": \"resources/list\",\n  \"params\": {\n    \"cursor\": \"eyJwYWdlIjogMn0=\"\n  }\n}\n```\n\n## Pagination Flow\n\n```mermaid\nsequenceDiagram\n    participant Client\n    participant Server\n\n    Client->>Server: List Request (no cursor)\n    loop Pagination Loop\n      Server-->>Client: Page of results + nextCursor\n      Client->>Server: List Request (with cursor)\n    end\n```\n\n## Operations Supporting Pagination\n\nThe following MCP operations support pagination:\n\n* `resources/list` - List available resources\n* `resources/templates/list` - List resource templates\n* `prompts/list` - List available prompts\n* `tools/list` - List available tools\n\n## Implementation Guidelines\n\n1. Servers **SHOULD**:\n\n   * Provide stable cursors\n   * Handle invalid cursors gracefully\n\n2. Clients **SHOULD**:\n\n   * Treat a missing `nextCursor` as the end of results\n   * Support both paginated and non-paginated flows\n\n3. Clients **MUST** treat cursors as opaque tokens:\n   * Don't make assumptions about cursor format\n   * Don't attempt to parse or modify cursors\n   * Don't persist cursors across sessions\n\n## Error Handling\n\nInvalid cursors **SHOULD** result in an error with code -32602 (Invalid params).\n\n\n# Versioning\nSource: https://modelcontextprotocol.io/specification/versioning\n\n\n\nThe Model Context Protocol uses string-based version identifiers following the format\n`YYYY-MM-DD`, to indicate the last date backwards incompatible changes were made.\n\n<Info>\n  The protocol version will *not* be incremented when the\n  protocol is updated, as long as the changes maintain backwards compatibility. This allows\n  for incremental improvements while preserving interoperability.\n</Info>\n\n## Revisions\n\nRevisions may be marked as:\n\n* **Draft**: in-progress specifications, not yet ready for consumption.\n* **Current**: the current protocol version, which is ready for use and may continue to\n  receive backwards compatible changes.\n* **Final**: past, complete specifications that will not be changed.\n\nThe **current** protocol version is [**2025-06-18**](/specification/2025-06-18/).\n\n## Negotiation\n\nVersion negotiation happens during\n[initialization](/specification/2025-06-18/basic/lifecycle#initialization). Clients and\nservers **MAY** support multiple protocol versions simultaneously, but they **MUST**\nagree on a single version to use for the session.\n\nThe protocol provides appropriate error handling if version negotiation fails, allowing\nclients to gracefully terminate connections when they cannot find a version compatible\nwith the server.\n\n\n# Building MCP with LLMs\nSource: https://modelcontextprotocol.io/tutorials/building-mcp-with-llms\n\nSpeed up your MCP development using LLMs such as Claude!\n\nThis guide will help you use LLMs to help you build custom Model Context Protocol (MCP) servers and clients. We'll be focusing on Claude for this tutorial, but you can do this with any frontier LLM.\n\n## Preparing the documentation\n\nBefore starting, gather the necessary documentation to help Claude understand MCP:\n\n1. Visit [https://modelcontextprotocol.io/llms-full.txt](https://modelcontextprotocol.io/llms-full.txt) and copy the full documentation text\n2. Navigate to either the [MCP TypeScript SDK](https://github.com/modelcontextprotocol/typescript-sdk) or [Python SDK repository](https://github.com/modelcontextprotocol/python-sdk)\n3. Copy the README files and other relevant documentation\n4. Paste these documents into your conversation with Claude\n\n## Describing your server\n\nOnce you've provided the documentation, clearly describe to Claude what kind of server you want to build. Be specific about:\n\n* What resources your server will expose\n* What tools it will provide\n* Any prompts it should offer\n* What external systems it needs to interact with\n\nFor example:\n\n```\nBuild an MCP server that:\n- Connects to my company's PostgreSQL database\n- Exposes table schemas as resources\n- Provides tools for running read-only SQL queries\n- Includes prompts for common data analysis tasks\n```\n\n## Working with Claude\n\nWhen working with Claude on MCP servers:\n\n1. Start with the core functionality first, then iterate to add more features\n2. Ask Claude to explain any parts of the code you don't understand\n3. Request modifications or improvements as needed\n4. Have Claude help you test the server and handle edge cases\n\nClaude can help implement all the key MCP features:\n\n* Resource management and exposure\n* Tool definitions and implementations\n* Prompt templates and handlers\n* Error handling and logging\n* Connection and transport setup\n\n## Best practices\n\nWhen building MCP servers with Claude:\n\n* Break down complex servers into smaller pieces\n* Test each component thoroughly before moving on\n* Keep security in mind - validate inputs and limit access appropriately\n* Document your code well for future maintenance\n* Follow MCP protocol specifications carefully\n\n## Next steps\n\nAfter Claude helps you build your server:\n\n1. Review the generated code carefully\n2. Test the server with the MCP Inspector tool\n3. Connect it to Claude.app or other MCP clients\n4. Iterate based on real usage and feedback\n\nRemember that Claude can help you modify and improve your server as requirements change over time.\n\nNeed more guidance? Just ask Claude specific questions about implementing MCP features or troubleshooting issues that arise.\n\n"
  },
  {
    "path": "package.json",
    "content": "{\n  \"name\": \"exa-mcp-server\",\n  \"version\": \"3.1.9\",\n  \"description\": \"A Model Context Protocol server with Exa for web search and web crawling. Provides real-time web searches with configurable tool selection, allowing users to enable or disable specific search capabilities. Supports customizable result counts, live crawling options, and returns content from the most relevant websites.\",\n  \"mcpName\": \"io.github.exa-labs/exa-mcp-server\",\n  \"type\": \"module\",\n  \"module\": \"./src/index.ts\",\n  \"repository\": {\n    \"type\": \"git\",\n    \"url\": \"git+https://github.com/exa-labs/exa-mcp-server.git\"\n  },\n  \"bin\": {\n    \"exa-mcp-server\": \".smithery/stdio/index.cjs\"\n  },\n  \"files\": [\n    \".smithery\"\n  ],\n  \"keywords\": [\n    \"mcp\",\n    \"search mcp\",\n    \"model context protocol\",\n    \"exa\",\n    \"search\",\n    \"websearch\",\n    \"claude\",\n    \"ai\",\n    \"research\",\n    \"papers\",\n    \"people\"\n  ],\n  \"author\": \"Exa Labs\",\n  \"scripts\": {\n    \"build\": \"npm run build:shttp && npm run build:stdio\",\n    \"build:stdio\": \"smithery build src/index.ts --transport stdio -o .smithery/stdio/index.cjs && echo '#!/usr/bin/env node' | cat - .smithery/stdio/index.cjs > temp && mv temp .smithery/stdio/index.cjs && chmod +x .smithery/stdio/index.cjs\",\n    \"build:shttp\": \"smithery build src/index.ts --transport shttp -o .smithery/shttp/index.cjs\",\n    \"build:vercel\": \"npm install typescript && ./node_modules/.bin/tsc\",\n    \"prepare\": \"npm run build:stdio\",\n    \"watch\": \"./node_modules/.bin/tsc --watch\",\n    \"dev\": \"npx @smithery/cli@latest dev\",\n    \"dev:vercel\": \"vercel dev\",\n    \"inspector\": \"npx @modelcontextprotocol/inspector build/index.js\",\n    \"prepublishOnly\": \"npm run build:stdio\"\n  },\n  \"dependencies\": {\n    \"@modelcontextprotocol/sdk\": \"^1.12.1\",\n    \"agnost\": \"^0.1.10\",\n    \"axios\": \"^1.7.8\",\n    \"mcp-handler\": \"^1.0.4\",\n    \"whoami\": \"^0.0.3\",\n    \"zod\": \"^3.22.4\"\n  },\n  \"devDependencies\": {\n    \"@smithery/cli\": \"^1.4.4\",\n    \"@types/node\": \"^20.11.24\",\n    \"@upstash/ratelimit\": \"^2.0.8\",\n    \"@upstash/redis\": \"^1.36.1\",\n    \"tsx\": \"^4.7.0\",\n    \"typescript\": \"^5.9.3\",\n    \"vercel\": \"^37.0.0\"\n  },\n  \"engines\": {\n    \"node\": \">=18.0.0\"\n  },\n  \"overrides\": {\n    \"tar\": \"^7.5.7\"\n  }\n}\n"
  },
  {
    "path": "server.json",
    "content": "{\n  \"$schema\": \"https://static.modelcontextprotocol.io/schemas/2025-07-09/server.schema.json\",\n  \"name\": \"io.github.exa-labs/exa-mcp-server\",\n  \"description\": \"MCP server with Exa for web search and web crawling. Exa is the search engine for AI Applications.\",\n  \"version\": \"3.1.9\",\n  \"packages\": [\n    {\n      \"registryType\": \"npm\",\n      \"identifier\": \"exa-mcp-server\",\n      \"version\": \"3.1.9\"\n    }\n  ],\n  \"remotes\": [\n    {\n      \"type\": \"sse\",\n      \"url\": \"https://mcp.exa.ai/mcp?tools=web_search_exa,web_search_advanced_exa,get_code_context_exa,crawling_exa\",\n      \"description\": \"Hosted Exa MCP server with web search and web crawling capabilities. Get the API key from https://dashboard.exa.ai/api-keys. Customize the tools parameter to enable only specific tools (comma-separated list).\"\n    }\n  ]\n}\n"
  },
  {
    "path": "skills/code-search/SKILL.md",
    "content": "---\nname: get-code-context-exa\ndescription: Code context using Exa. Finds real snippets and docs from GitHub, StackOverflow, and technical docs. Use when searching for code examples, API syntax, library documentation, or debugging help.\ncontext: fork\n---\n\n# Code Context (Exa)\n\n## Tool Restriction (Critical)\n\nONLY use `get_code_context_exa`. Do NOT use other Exa tools.\n\n## Token Isolation (Critical)\n\nNever run Exa in main context. Always spawn Task agents:\n- Agent calls `get_code_context_exa`\n- Agent extracts the minimum viable snippet(s) + constraints\n- Agent deduplicates near-identical results (mirrors, forks, repeated StackOverflow answers) before presenting\n- Agent returns copyable snippets + brief explanation\n- Main context stays clean regardless of search volume\n\n## When to Use\n\nUse this tool for ANY programming-related request:\n- API usage and syntax\n- SDK/library examples\n- config and setup patterns\n- framework \"how to\" questions\n- debugging when you need authoritative snippets\n\n## Inputs (Supported)\n\n`get_code_context_exa` supports:\n- `query` (string, required)\n- `tokensNum` (number, optional; default ~5000; typical range 1000–50000)\n\n## Query Writing Patterns (High Signal)\n\nTo reduce irrelevant results and cross-language noise:\n- Always include the **programming language** in the query.\n  - Example: use **\"Go generics\"** instead of just **\"generics\"**.\n- When applicable, also include **framework + version** (e.g., \"Next.js 14\", \"React 19\", \"Python 3.12\").\n- Include exact identifiers (function/class names, config keys, error messages) when you have them.\n\n## Dynamic Tuning\n\nToken strategy:\n- Focused snippet needed → tokensNum 1000–3000\n- Most tasks → tokensNum 5000\n- Complex integration → tokensNum 10000–20000\n- Only go larger when necessary (avoid dumping large context)\n\n## Output Format (Recommended)\n\nReturn:\n1) Best minimal working snippet(s) (keep it copy/paste friendly)\n2) Notes on version / constraints / gotchas\n3) Sources (URLs if present in returned context)\n\nBefore presenting:\n- Deduplicate similar results and keep only the best representative snippet per approach.\n\n## MCP Configuration\n\n```json\n{\n  \"servers\": {\n    \"exa\": {\n      \"type\": \"http\",\n      \"url\": \"https://mcp.exa.ai/mcp?tools=get_code_context_exa\"\n    }\n  }\n}\n```\n"
  },
  {
    "path": "skills/company-search/SKILL.md",
    "content": "---\nname: company-research\ndescription: Company research using Exa search. Finds company info, competitors, news, tweets, financials, LinkedIn profiles, builds company lists. Use when researching companies, doing competitor analysis, market research, or building company lists.\ncontext: fork\n---\n\n# Company Research\n\n## Tool Restriction (Critical)\n\nONLY use `web_search_advanced_exa`. Do NOT use `web_search_exa` or any other Exa tools.\n\n## Token Isolation (Critical)\n\nNever run Exa searches in main context. Always spawn Task agents:\n- Agent runs Exa search internally\n- Agent processes results using LLM intelligence\n- Agent returns only distilled output (compact JSON or brief markdown)\n- Main context stays clean regardless of search volume\n\n## Dynamic Tuning\n\nNo hardcoded numResults. Tune to user intent:\n- User says \"a few\" → 10-20\n- User says \"comprehensive\" → 50-100\n- User specifies number → match it\n- Ambiguous? Ask: \"How many companies would you like?\"\n\n## Query Variation\n\nExa returns different results for different phrasings. For coverage:\n- Generate 2-3 query variations\n- Run in parallel\n- Merge and deduplicate\n\n## Categories\n\nUse appropriate Exa `category` depending on what you need:\n- `company` → homepages, rich metadata (headcount, location, funding, revenue)\n- `news` → press coverage, announcements\n- `tweet` → social presence, public commentary\n- `people` → LinkedIn profiles (public data)\n- No category (`type: \"auto\"`) → general web results, deep dives, broader context\n\nStart with `category: \"company\"` for discovery, then use other categories or no category with `livecrawl: \"fallback\"` for deeper research.\n\n### Category-Specific Filter Restrictions\n\nWhen using `category: \"company\"`, these parameters cause 400 errors:\n- `includeDomains` / `excludeDomains`\n- `startPublishedDate` / `endPublishedDate`\n- `startCrawlDate` / `endCrawlDate`\n\nWhen searching without a category (or with `news`), domain and date filters work fine.\n\n**Universal restriction:** `includeText` and `excludeText` only support **single-item arrays**. Multi-item arrays cause 400 errors across all categories.\n\n## LinkedIn\n\nPublic LinkedIn via Exa: `category: \"people\"`, no other filters.\nAuth-required LinkedIn → use Claude in Chrome browser fallback.\n\n## Browser Fallback\n\nAuto-fallback to Claude in Chrome when:\n- Exa returns insufficient results\n- Content is auth-gated\n- Dynamic pages need JavaScript\n\n## Examples\n\n### Discovery: find companies in a space\n```\nweb_search_advanced_exa {\n  \"query\": \"AI infrastructure startups San Francisco\",\n  \"category\": \"company\",\n  \"numResults\": 20,\n  \"type\": \"auto\"\n}\n```\n\n### Deep dive: research a specific company\n```\nweb_search_advanced_exa {\n  \"query\": \"Anthropic funding rounds valuation 2024\",\n  \"type\": \"deep\",\n  \"livecrawl\": \"fallback\",\n  \"numResults\": 10,\n  \"includeDomains\": [\"techcrunch.com\", \"crunchbase.com\", \"bloomberg.com\"]\n}\n```\n\n### News coverage\n```\nweb_search_advanced_exa {\n  \"query\": \"Anthropic AI safety\",\n  \"category\": \"news\",\n  \"numResults\": 15,\n  \"startPublishedDate\": \"2024-01-01\"\n}\n```\n\n### LinkedIn profiles\n```\nweb_search_advanced_exa {\n  \"query\": \"VP Engineering AI infrastructure\",\n  \"category\": \"people\",\n  \"numResults\": 20\n}\n```\n\n## Output Format\n\nReturn:\n1) Results (structured list; one company per row)\n2) Sources (URLs; 1-line relevance each)\n3) Notes (uncertainty/conflicts)\n"
  },
  {
    "path": "skills/financial-report-search/SKILL.md",
    "content": "---\nname: web-search-advanced-financial-report\ndescription: Search for financial reports using Exa advanced search. Near-full filter support for finding SEC filings, earnings reports, and financial documents. Use when searching for 10-K filings, quarterly earnings, or annual reports.\ncontext: fork\n---\n\n# Web Search Advanced - Financial Report Category\n\n## Tool Restriction (Critical)\n\nONLY use `web_search_advanced_exa` with `category: \"financial report\"`. Do NOT use other categories or tools.\n\n## Filter Restrictions (Critical)\n\nThe `financial report` category has one known restriction:\n\n- `excludeText` - NOT SUPPORTED (causes 400 error)\n\n## Supported Parameters\n\n### Core\n- `query` (required)\n- `numResults`\n- `type` (\"auto\", \"fast\", \"deep\", \"neural\")\n\n### Domain filtering\n- `includeDomains` (e.g., [\"sec.gov\", \"investor.apple.com\"])\n- `excludeDomains`\n\n### Date filtering (ISO 8601) - Very useful for financial reports!\n- `startPublishedDate` / `endPublishedDate`\n- `startCrawlDate` / `endCrawlDate`\n\n### Text filtering\n- `includeText` (must contain ALL) - **single-item arrays only**; multi-item causes 400\n- ~~`excludeText`~~ - NOT SUPPORTED\n\n### Content extraction\n- `textMaxCharacters` / `contextMaxCharacters`\n- `enableSummary` / `summaryQuery`\n- `enableHighlights` / `highlightsNumSentences` / `highlightsPerUrl` / `highlightsQuery`\n\n### Additional\n- `additionalQueries`\n- `livecrawl` / `livecrawlTimeout`\n- `subpages` / `subpageTarget`\n\n## Token Isolation (Critical)\n\nNever run Exa searches in main context. Always spawn Task agents:\n- Agent calls `web_search_advanced_exa` with `category: \"financial report\"`\n- Agent merges + deduplicates results before presenting\n- Agent returns distilled output (brief markdown or compact JSON)\n- Main context stays clean regardless of search volume\n\n## When to Use\n\nUse this category when you need:\n- SEC filings (10-K, 10-Q, 8-K, S-1)\n- Quarterly earnings reports\n- Annual reports\n- Investor presentations\n- Financial statements\n\n## Examples\n\nSEC filings for a company:\n```\nweb_search_advanced_exa {\n  \"query\": \"Anthropic SEC filing S-1\",\n  \"category\": \"financial report\",\n  \"numResults\": 10,\n  \"type\": \"auto\"\n}\n```\n\nRecent earnings reports:\n```\nweb_search_advanced_exa {\n  \"query\": \"Q4 2025 earnings report technology\",\n  \"category\": \"financial report\",\n  \"startPublishedDate\": \"2025-10-01\",\n  \"numResults\": 20,\n  \"type\": \"auto\"\n}\n```\n\nSpecific filing type:\n```\nweb_search_advanced_exa {\n  \"query\": \"10-K annual report AI companies\",\n  \"category\": \"financial report\",\n  \"includeDomains\": [\"sec.gov\"],\n  \"startPublishedDate\": \"2025-01-01\",\n  \"numResults\": 15,\n  \"type\": \"deep\"\n}\n```\n\nRisk factors analysis:\n```\nweb_search_advanced_exa {\n  \"query\": \"risk factors cybersecurity\",\n  \"category\": \"financial report\",\n  \"includeText\": [\"cybersecurity\"],\n  \"numResults\": 10,\n  \"enableHighlights\": true,\n  \"highlightsQuery\": \"What are the main cybersecurity risks?\"\n}\n```\n\n## Output Format\n\nReturn:\n1) Results (company name, filing type, date, key figures/highlights)\n2) Sources (Filing URLs)\n3) Notes (reporting period, any restatements, auditor notes)\n"
  },
  {
    "path": "skills/people-search/SKILL.md",
    "content": "---\nname: people-research\ndescription: People research using Exa search. Finds LinkedIn profiles, professional backgrounds, experts, team members, and public bios across the web. Use when searching for people, finding experts, or looking up professional profiles.\ncontext: fork\n---\n\n# People Research\n\n## Tool Restriction (Critical)\n\nONLY use `web_search_advanced_exa`. Do NOT use `web_search_exa` or any other Exa tools.\n\n## Token Isolation (Critical)\n\nNever run Exa searches in main context. Always spawn Task agents:\n- Agent runs Exa search internally\n- Agent processes results using LLM intelligence\n- Agent returns only distilled output (compact JSON or brief markdown)\n- Main context stays clean regardless of search volume\n\n## Dynamic Tuning\n\nNo hardcoded numResults. Tune to user intent:\n- User says \"a few\" → 10-20\n- User says \"comprehensive\" → 50-100\n- User specifies number → match it\n- Ambiguous? Ask: \"How many profiles would you like?\"\n\n## Query Variation\n\nExa returns different results for different phrasings. For coverage:\n- Generate 2-3 query variations\n- Run in parallel\n- Merge and deduplicate\n\n## Categories\n\nUse appropriate Exa `category` depending on what you need:\n- `people` → LinkedIn profiles, public bios (primary for discovery)\n- `personal site` → personal blogs, portfolio sites, about pages\n- `news` → press mentions, interviews, speaker bios\n- No category (`type: \"auto\"`) → general web results, broader context\n\nStart with `category: \"people\"` for profile discovery, then use other categories or no category with `livecrawl: \"fallback\"` for deeper research on specific individuals.\n\n### Category-Specific Filter Restrictions\n\nWhen using `category: \"people\"`, these parameters cause errors:\n- `startPublishedDate` / `endPublishedDate`\n- `startCrawlDate` / `endCrawlDate`\n- `includeText` / `excludeText`\n- `excludeDomains`\n- `includeDomains` — **LinkedIn domains only** (e.g., \"linkedin.com\")\n\nWhen searching without a category, all parameters are available (but `includeText`/`excludeText` still only support single-item arrays).\n\n## LinkedIn\n\nPublic LinkedIn via Exa: `category: \"people\"`, no other filters.\nAuth-required LinkedIn → use Claude in Chrome browser fallback.\n\n## Browser Fallback\n\nAuto-fallback to Claude in Chrome when:\n- Exa returns insufficient results\n- Content is auth-gated\n- Dynamic pages need JavaScript\n\n## Examples\n\n### Discovery: find people by role\n```\nweb_search_advanced_exa {\n  \"query\": \"VP Engineering AI infrastructure\",\n  \"category\": \"people\",\n  \"numResults\": 20,\n  \"type\": \"auto\"\n}\n```\n\n### With query variations\n```\nweb_search_advanced_exa {\n  \"query\": \"machine learning engineer San Francisco\",\n  \"category\": \"people\",\n  \"additionalQueries\": [\"ML engineer SF\", \"AI engineer Bay Area\"],\n  \"numResults\": 25,\n  \"type\": \"deep\"\n}\n```\n\n### Deep dive: research a specific person\n```\nweb_search_advanced_exa {\n  \"query\": \"Dario Amodei Anthropic CEO background\",\n  \"type\": \"auto\",\n  \"livecrawl\": \"fallback\",\n  \"numResults\": 15\n}\n```\n\n### News mentions\n```\nweb_search_advanced_exa {\n  \"query\": \"Dario Amodei interview\",\n  \"category\": \"news\",\n  \"numResults\": 10,\n  \"startPublishedDate\": \"2024-01-01\"\n}\n```\n\n## Output Format\n\nReturn:\n1) Results (name, title, company, location if available)\n2) Sources (Profile URLs)\n3) Notes (profile completeness, verification status)\n"
  },
  {
    "path": "skills/personal-site-search/SKILL.md",
    "content": "---\nname: web-search-advanced-personal-site\ndescription: Search personal websites and blogs using Exa advanced search. Full filter support for finding individual perspectives, portfolios, and personal blogs. Use when searching for personal sites, blog posts, or portfolio websites.\ncontext: fork\n---\n\n# Web Search Advanced - Personal Site Category\n\n## Tool Restriction (Critical)\n\nONLY use `web_search_advanced_exa` with `category: \"personal site\"`. Do NOT use other categories or tools.\n\n## Full Filter Support\n\nThe `personal site` category supports ALL available parameters:\n\n### Core\n- `query` (required)\n- `numResults`\n- `type` (\"auto\", \"fast\", \"deep\", \"neural\")\n\n### Domain filtering\n- `includeDomains`\n- `excludeDomains` (e.g., exclude Medium if you want independent blogs)\n\n### Date filtering (ISO 8601)\n- `startPublishedDate` / `endPublishedDate`\n- `startCrawlDate` / `endCrawlDate`\n\n### Text filtering\n- `includeText` (must contain ALL)\n- `excludeText` (exclude if ANY match)\n\n**Array size restriction:** `includeText` and `excludeText` only support **single-item arrays**. Multi-item arrays (2+ items) cause 400 errors. To match multiple terms, put them in the `query` string or run separate searches.\n\n### Content extraction\n- `textMaxCharacters` / `contextMaxCharacters`\n- `enableSummary` / `summaryQuery`\n- `enableHighlights` / `highlightsNumSentences` / `highlightsPerUrl` / `highlightsQuery`\n\n### Additional\n- `additionalQueries`\n- `livecrawl` / `livecrawlTimeout`\n- `subpages` / `subpageTarget` - useful for exploring portfolio sites\n\n## Token Isolation (Critical)\n\nNever run Exa searches in main context. Always spawn Task agents:\n- Agent calls `web_search_advanced_exa` with `category: \"personal site\"`\n- Agent merges + deduplicates results before presenting\n- Agent returns distilled output (brief markdown or compact JSON)\n- Main context stays clean regardless of search volume\n\n## When to Use\n\nUse this category when you need:\n- Individual expert opinions and experiences\n- Personal blog posts on technical topics\n- Portfolio websites\n- Independent analysis (not corporate content)\n- Deep dives and tutorials from practitioners\n\n## Examples\n\nTechnical blog posts:\n```\nweb_search_advanced_exa {\n  \"query\": \"building production LLM applications lessons learned\",\n  \"category\": \"personal site\",\n  \"numResults\": 15,\n  \"type\": \"deep\",\n  \"enableSummary\": true\n}\n```\n\nRecent posts on a topic:\n```\nweb_search_advanced_exa {\n  \"query\": \"Rust async runtime comparison\",\n  \"category\": \"personal site\",\n  \"startPublishedDate\": \"2025-01-01\",\n  \"numResults\": 10,\n  \"type\": \"auto\"\n}\n```\n\nExclude aggregators:\n```\nweb_search_advanced_exa {\n  \"query\": \"startup founder lessons\",\n  \"category\": \"personal site\",\n  \"excludeDomains\": [\"medium.com\", \"substack.com\"],\n  \"numResults\": 15,\n  \"type\": \"auto\"\n}\n```\n\n## Output Format\n\nReturn:\n1) Results (title, author/site name, date, key insights)\n2) Sources (URLs)\n3) Notes (author expertise, potential biases, depth of coverage)\n"
  },
  {
    "path": "skills/research-paper-search/SKILL.md",
    "content": "---\nname: web-search-advanced-research-paper\ndescription: Search for research papers and academic content using Exa advanced search. Full filter support including date ranges and text filtering. Use when searching for academic papers, arXiv preprints, or scientific research.\ncontext: fork\n---\n\n# Web Search Advanced - Research Paper Category\n\n## Tool Restriction (Critical)\n\nONLY use `web_search_advanced_exa` with `category: \"research paper\"`. Do NOT use other categories or tools.\n\n## Full Filter Support\n\nThe `research paper` category supports ALL available parameters:\n\n### Core\n- `query` (required)\n- `numResults`\n- `type` (\"auto\", \"fast\", \"deep\", \"neural\")\n\n### Domain filtering\n- `includeDomains` (e.g., [\"arxiv.org\", \"openreview.net\"])\n- `excludeDomains`\n\n### Date filtering (ISO 8601)\n- `startPublishedDate` / `endPublishedDate`\n- `startCrawlDate` / `endCrawlDate`\n\n### Text filtering\n- `includeText` (must contain ALL)\n- `excludeText` (exclude if ANY match)\n\n**Array size restriction:** `includeText` and `excludeText` only support **single-item arrays**. Multi-item arrays (2+ items) cause 400 errors. To match multiple terms, put them in the `query` string or run separate searches.\n\n### Content extraction\n- `textMaxCharacters` / `contextMaxCharacters`\n- `enableSummary` / `summaryQuery`\n- `enableHighlights` / `highlightsNumSentences` / `highlightsPerUrl` / `highlightsQuery`\n\n### Additional\n- `userLocation`\n- `moderation`\n- `additionalQueries`\n- `livecrawl` / `livecrawlTimeout`\n- `subpages` / `subpageTarget`\n\n## Token Isolation (Critical)\n\nNever run Exa searches in main context. Always spawn Task agents:\n- Agent calls `web_search_advanced_exa` with `category: \"research paper\"`\n- Agent merges + deduplicates results before presenting\n- Agent returns distilled output (brief markdown or compact JSON)\n- Main context stays clean regardless of search volume\n\n## When to Use\n\nUse this category when you need:\n- Academic papers from arXiv, OpenReview, PubMed, etc.\n- Scientific research on specific topics\n- Literature reviews with date filtering\n- Papers containing specific methodologies or terms\n\n## Examples\n\nRecent papers on a topic:\n```\nweb_search_advanced_exa {\n  \"query\": \"transformer attention mechanisms efficiency\",\n  \"category\": \"research paper\",\n  \"startPublishedDate\": \"2024-01-01\",\n  \"numResults\": 15,\n  \"type\": \"auto\"\n}\n```\n\nPapers from specific venues:\n```\nweb_search_advanced_exa {\n  \"query\": \"large language model agents\",\n  \"category\": \"research paper\",\n  \"includeDomains\": [\"arxiv.org\", \"openreview.net\"],\n  \"includeText\": [\"LLM\"],\n  \"numResults\": 20,\n  \"type\": \"deep\"\n}\n```\n\n## Output Format\n\nReturn:\n1) Results (structured list with title, authors, date, abstract summary)\n2) Sources (URLs with publication venue)\n3) Notes (methodology differences, conflicting findings)\n"
  },
  {
    "path": "skills/x-search/SKILL.md",
    "content": "---\nname: web-search-advanced-tweet\ndescription: Search tweets and Twitter/X content using Exa advanced search. Limited filter support - text and domain filters are NOT supported. Use when searching for tweets, Twitter/X discussions, or social media sentiment.\ncontext: fork\n---\n\n# Web Search Advanced - Tweet Category\n\n## Tool Restriction (Critical)\n\nONLY use `web_search_advanced_exa` with `category: \"tweet\"`. Do NOT use other categories or tools.\n\n## Filter Restrictions (Critical)\n\nThe `tweet` category has **LIMITED filter support**. The following parameters are **NOT supported** and will cause 400 errors:\n\n- `includeText` - NOT SUPPORTED\n- `excludeText` - NOT SUPPORTED\n- `includeDomains` - NOT SUPPORTED\n- `excludeDomains` - NOT SUPPORTED\n- `moderation` - NOT SUPPORTED (causes 500 server error)\n\n## Supported Parameters\n\n### Core\n- `query` (required)\n- `numResults`\n- `type` (\"auto\", \"fast\", \"deep\", \"neural\")\n\n### Date filtering (ISO 8601) - Use these instead of text filters!\n- `startPublishedDate` / `endPublishedDate`\n- `startCrawlDate` / `endCrawlDate`\n\n### Content extraction\n- `textMaxCharacters` / `contextMaxCharacters`\n- `enableHighlights` / `highlightsNumSentences` / `highlightsPerUrl` / `highlightsQuery`\n- `enableSummary` / `summaryQuery`\n\n### Additional\n- `additionalQueries` - useful for hashtag variations\n- `livecrawl` / `livecrawlTimeout` - use \"preferred\" for recent tweets\n\n## Token Isolation (Critical)\n\nNever run Exa searches in main context. Always spawn Task agents:\n- Agent calls `web_search_advanced_exa` with `category: \"tweet\"`\n- Agent merges + deduplicates results before presenting\n- Agent returns distilled output (brief markdown or compact JSON)\n- Main context stays clean regardless of search volume\n\n## When to Use\n\nUse this category when you need:\n- Social discussions on a topic\n- Product announcements from company accounts\n- Developer opinions and experiences\n- Trending topics and community sentiment\n- Expert takes and threads\n\n## Examples\n\nRecent tweets on a topic:\n```\nweb_search_advanced_exa {\n  \"query\": \"Claude Code MCP experience\",\n  \"category\": \"tweet\",\n  \"startPublishedDate\": \"2025-01-01\",\n  \"numResults\": 20,\n  \"type\": \"auto\",\n  \"livecrawl\": \"preferred\"\n}\n```\n\nSearch with specific keywords (put keywords in query, not includeText):\n```\nweb_search_advanced_exa {\n  \"query\": \"launching announcing new open source release\",\n  \"category\": \"tweet\",\n  \"startPublishedDate\": \"2025-12-01\",\n  \"numResults\": 15,\n  \"type\": \"auto\"\n}\n```\n\nDeveloper sentiment (use specific query terms instead of excludeText):\n```\nweb_search_advanced_exa {\n  \"query\": \"developer experience DX frustrating painful\",\n  \"category\": \"tweet\",\n  \"numResults\": 20,\n  \"type\": \"deep\",\n  \"livecrawl\": \"preferred\"\n}\n```\n\n## Output Format\n\nReturn:\n1) Results (tweet content, author handle, date, engagement if visible)\n2) Sources (Tweet URLs)\n3) Notes (sentiment summary, notable accounts, threads vs single tweets)\n\nImportant: Be aware that tweet content can be informal, sarcastic, or context-dependent.\n"
  },
  {
    "path": "smithery-example.json",
    "content": "{\n  \"exaApiKey\": \"your-exa-api-key-here\",\n  \"enabledTools\": [\n    \"web_search_exa\",\n    \"web_search_advanced_exa\",\n    \"get_code_context_exa\",\n    \"crawling_exa\"\n  ],\n  \"debug\": false\n}"
  },
  {
    "path": "smithery.yaml",
    "content": "runtime: typescript "
  },
  {
    "path": "src/index.ts",
    "content": "#!/usr/bin/env node\nprocess.env.AGNOST_LOG_LEVEL = 'error';\n\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { z } from \"zod\";\nimport { log } from \"./utils/logger.js\";\nimport { initializeMcpServer } from \"./mcp-handler.js\";\n\n// Configuration schema for the EXA API key and tool selection\nexport const configSchema = z.object({\n  exaApiKey: z.string().optional().describe(\"Exa AI API key for search operations\"),\n  enabledTools: z.union([\n    z.array(z.string()),\n    z.string()\n  ]).optional().describe(\"List of tools to enable (comma-separated string or array)\"),\n  tools: z.union([\n    z.array(z.string()),\n    z.string()\n  ]).optional().describe(\"List of tools to enable (comma-separated string or array) - alias for enabledTools\"),\n  debug: z.boolean().default(false).describe(\"Enable debug logging\")\n});\n\n// Export stateless flag for MCP\nexport const stateless = true;\n\n/**\n * Exa AI Web Search MCP Server\n * \n * This MCP server integrates Exa AI's search capabilities with Claude and other MCP-compatible clients.\n * Exa is a search engine and API specifically designed for up-to-date web searching and retrieval,\n * offering more recent and comprehensive results than what might be available in an LLM's training data.\n * \n * The server provides tools that enable:\n * - Real-time web searching with configurable parameters\n * - Company research and analysis\n * - Web content crawling\n * - People search capabilities\n * - Deep research workflows\n * - And more!\n * \n * This is the Smithery CLI entry point. For Vercel deployment, see api/mcp.ts\n */\n\nexport default function ({ config }: { config: z.infer<typeof configSchema> }) {\n  try {\n    // Parse and normalize tool selection\n    // Support both 'tools' and 'enabledTools' parameters\n    // Support both comma-separated strings and arrays\n    let parsedEnabledTools: string[] | undefined;\n    \n    const toolsParam = config.tools || config.enabledTools;\n    \n    if (toolsParam) {\n      if (typeof toolsParam === 'string') {\n        // Parse comma-separated string into array\n        parsedEnabledTools = toolsParam\n          .split(',')\n          .map(tool => tool.trim())\n          .filter(tool => tool.length > 0);\n      } else if (Array.isArray(toolsParam)) {\n        parsedEnabledTools = toolsParam;\n      }\n    }\n    \n    // Create normalized config with parsed tools\n    const normalizedConfig = {\n      exaApiKey: config.exaApiKey,\n      enabledTools: parsedEnabledTools,\n      debug: config.debug\n    };\n    \n    if (config.debug) {\n      log(\"Starting Exa MCP Server (Smithery) in debug mode\");\n      if (parsedEnabledTools) {\n        log(`Enabled tools from config: ${parsedEnabledTools.join(', ')}`);\n      }\n    }\n\n    // Create MCP server\n    const server = new McpServer({\n      name: \"exa-search-server\",\n      title: \"Exa\",\n      version: \"3.1.9\"\n    });\n    \n    log(\"Server initialized with modern MCP SDK and Smithery CLI support\");\n\n    // Initialize server with shared logic\n    initializeMcpServer(server, normalizedConfig);\n    \n    // Return the server object (Smithery CLI handles transport)\n    return server.server;\n    \n  } catch (error) {\n    log(`Server initialization error: ${error instanceof Error ? error.message : String(error)}`);\n    throw error;\n  }\n}\n"
  },
  {
    "path": "src/mcp-handler.ts",
    "content": "import { z } from \"zod\";\nimport { trackMCP, createConfig } from 'agnost';\n\n// Import tool implementations\nimport { registerWebSearchTool } from \"./tools/webSearch.js\";\nimport { registerCompanyResearchTool } from \"./tools/companyResearch.js\";\nimport { registerCrawlingTool } from \"./tools/crawling.js\";\nimport { registerPeopleSearchTool } from \"./tools/peopleSearch.js\";\nimport { registerLinkedInSearchTool } from \"./tools/linkedInSearch.js\";\nimport { registerDeepResearchStartTool } from \"./tools/deepResearchStart.js\";\nimport { registerDeepResearchCheckTool } from \"./tools/deepResearchCheck.js\";\nimport { registerExaCodeTool } from \"./tools/exaCode.js\";\nimport { registerWebSearchAdvancedTool } from \"./tools/webSearchAdvanced.js\";\nimport { registerDeepSearchTool } from \"./tools/deepSearch.js\";\nimport { log } from \"./utils/logger.js\";\n\n// Tool registry for managing available tools\nconst availableTools = {\n  'web_search_exa': { name: 'Web Search (Exa)', description: 'Real-time web search using Exa AI', enabled: true },\n  'web_search_advanced_exa': { name: 'Advanced Web Search (Exa)', description: 'Advanced web search with full Exa API control including category filters, domain restrictions, date ranges, highlights, summaries, and subpage crawling', enabled: false },\n  'get_code_context_exa': { name: 'Code Context Search', description: 'Search for code snippets, examples, and documentation from open source repositories', enabled: true },\n  'company_research_exa': { name: 'Company Research (Deprecated)', description: 'Deprecated: Use web_search_advanced_exa instead. Research companies and organizations', enabled: false },\n  'crawling_exa': { name: 'Web Crawling', description: 'Extract content from specific URLs', enabled: false },\n  'deep_researcher_start': { name: 'Deep Researcher Start (Deprecated)', description: 'Deprecated: Start a comprehensive AI research task', enabled: false },\n  'deep_researcher_check': { name: 'Deep Researcher Check (Deprecated)', description: 'Deprecated: Check status and retrieve results of research task', enabled: false },\n  'people_search_exa': { name: 'People Search (Deprecated)', description: 'Deprecated: Use web_search_advanced_exa instead. Search for people and professional profiles', enabled: false },\n  'linkedin_search_exa': { name: 'LinkedIn Search (Deprecated)', description: 'Deprecated: Use web_search_advanced_exa instead', enabled: false },\n  'deep_search_exa': { name: 'Deep Search (Deprecated)', description: 'Deprecated: Use web_search_advanced_exa instead. Deep search with query expansion and synthesized answers (requires API key)', enabled: false },\n};\n\nexport interface McpConfig {\n  exaApiKey?: string;\n  enabledTools?: string[];\n  debug?: boolean;\n  userProvidedApiKey?: boolean;\n}\n\n/**\n * Initialize and configure the MCP server with all tools, prompts, and resources\n * This function is called by both Vercel Functions and Smithery transports\n * \n * @param server - The MCP server instance (can be from McpServer or mcp-handler)\n * @param config - Configuration object with API key and tool settings\n */\nexport function initializeMcpServer(server: any, config: McpConfig = {}) {\n  try {\n    if (config.debug) {\n      log(\"Initializing Exa MCP Server in debug mode\");\n      if (config.enabledTools) {\n        log(`Enabled tools from config: ${config.enabledTools.join(', ')}`);\n      }\n    }\n\n    // Helper function to check if a tool should be registered\n    const shouldRegisterTool = (toolId: string): boolean => {\n      if (config.enabledTools && config.enabledTools.length > 0) {\n        return config.enabledTools.includes(toolId);\n      }\n      return availableTools[toolId as keyof typeof availableTools]?.enabled ?? false;\n    };\n\n    // Register tools based on configuration\n    const registeredTools: string[] = [];\n    \n    if (shouldRegisterTool('web_search_exa')) {\n      registerWebSearchTool(server, config);\n      registeredTools.push('web_search_exa');\n    }\n    \n    if (shouldRegisterTool('web_search_advanced_exa')) {\n      registerWebSearchAdvancedTool(server, config);\n      registeredTools.push('web_search_advanced_exa');\n    }\n    \n    if (shouldRegisterTool('company_research_exa')) {\n      registerCompanyResearchTool(server, config);\n      registeredTools.push('company_research_exa');\n    }\n    \n    if (shouldRegisterTool('crawling_exa')) {\n      registerCrawlingTool(server, config);\n      registeredTools.push('crawling_exa');\n    }\n    \n    if (shouldRegisterTool('people_search_exa')) {\n      registerPeopleSearchTool(server, config);\n      registeredTools.push('people_search_exa');\n    }\n    \n    // Deprecated: linkedin_search_exa - kept for backwards compatibility\n    if (shouldRegisterTool('linkedin_search_exa')) {\n      registerLinkedInSearchTool(server, config);\n      registeredTools.push('linkedin_search_exa');\n    }\n    \n    if (shouldRegisterTool('deep_researcher_start')) {\n      registerDeepResearchStartTool(server, config);\n      registeredTools.push('deep_researcher_start');\n    }\n    \n    if (shouldRegisterTool('deep_researcher_check')) {\n      registerDeepResearchCheckTool(server, config);\n      registeredTools.push('deep_researcher_check');\n    }\n    \n    if (shouldRegisterTool('get_code_context_exa')) {\n      registerExaCodeTool(server, config);\n      registeredTools.push('get_code_context_exa');\n    }\n    \n    // deep_search_exa requires the user to have provided their own API key\n    if (shouldRegisterTool('deep_search_exa') && config.userProvidedApiKey) {\n      registerDeepSearchTool(server, config);\n      registeredTools.push('deep_search_exa');\n    }\n    \n    if (config.debug) {\n      log(`Registered ${registeredTools.length} tools: ${registeredTools.join(', ')}`);\n    }\n    \n    // Register prompts to help users get started\n    server.prompt(\n      \"web_search_help\",\n      \"Get help with web search using Exa\",\n      {},\n      async () => {\n        return {\n          messages: [\n            {\n              role: \"user\",\n              content: {\n                type: \"text\",\n                text: \"I want to search the web for current information. Can you help me search for recent news about artificial intelligence breakthroughs?\"\n              }\n            }\n          ]\n        };\n      }\n    );\n\n    server.prompt(\n      \"code_search_help\",\n      \"Get help finding code examples and documentation\",\n      {},\n      async () => {\n        return {\n          messages: [\n            {\n              role: \"user\",\n              content: {\n                type: \"text\",\n                text: \"I need help with a programming task. Can you search for examples of how to use React hooks for state management?\"\n              }\n            }\n          ]\n        };\n      }\n    );\n    \n    // Register resources to expose server information\n    server.resource(\n      \"tools_list\",\n      \"exa://tools/list\",\n      {\n        mimeType: \"application/json\",\n        description: \"List of available Exa tools and their descriptions\"\n      },\n      async () => {\n        const toolsList = Object.entries(availableTools).map(([id, tool]) => ({\n          id,\n          name: tool.name,\n          description: tool.description,\n          enabled: registeredTools.includes(id)\n        }));\n        \n        return {\n          contents: [{\n            uri: \"exa://tools/list\",\n            text: JSON.stringify(toolsList, null, 2),\n            mimeType: \"application/json\"\n          }]\n        };\n      }\n    );\n    \n    // Add Agnost analytics tracking (works with both McpServer and mcp-handler)\n    // The server object might be wrapped, so we try to access the underlying server\n    const underlyingServer = (server as any).server || server;\n    \n    try {\n      trackMCP(underlyingServer, \"f0df908b-3703-40a0-a905-05c907da1ca3\", createConfig({\n        endpoint: \"https://api.agnost.ai\",\n        disableLogs: true\n      }));\n      \n      if (config.debug) {\n        log(\"Agnost analytics tracking enabled\");\n      }\n    } catch (analyticsError) {\n      // Log but don't fail if analytics setup fails\n      if (config.debug) {\n        log(`Analytics tracking setup failed (non-critical): ${analyticsError}`);\n      }\n    }\n    \n    if (config.debug) {\n      log(\"MCP server initialization complete\");\n    }\n    \n  } catch (error) {\n    log(`Server initialization error: ${error instanceof Error ? error.message : String(error)}`);\n    throw error;\n  }\n}\n\n"
  },
  {
    "path": "src/tools/companyResearch.ts",
    "content": "import { z } from \"zod\";\nimport axios from \"axios\";\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { API_CONFIG } from \"./config.js\";\nimport { ExaSearchRequest, ExaSearchResponse } from \"../types.js\";\nimport { createRequestLogger } from \"../utils/logger.js\";\nimport { handleRateLimitError } from \"../utils/errorHandler.js\";\nimport { sanitizeSearchResponse } from \"../utils/exaResponseSanitizer.js\";\nimport { checkpoint } from \"agnost\";\n\nexport function registerCompanyResearchTool(server: McpServer, config?: { exaApiKey?: string; userProvidedApiKey?: boolean }): void {\n  server.tool(\n    \"company_research_exa\",\n    `[Deprecated: Use web_search_advanced_exa instead] Research any company to get business information, news, and insights.\n\nBest for: Learning about a company's products, services, recent news, or industry position.\nReturns: Company information from trusted business sources.`,\n    {\n      companyName: z.string().describe(\"Name of the company to research\"),\n      numResults: z.coerce.number().optional().describe(\"Number of search results to return (must be a number, default: 3)\")\n    },\n    {\n      readOnlyHint: true,\n      destructiveHint: false,\n      idempotentHint: true\n    },\n    async ({ companyName, numResults }) => {\n      const requestId = `company_research_exa-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`;\n      const logger = createRequestLogger(requestId, 'company_research_exa');\n      \n      logger.start(companyName);\n      \n      try {\n        // Create a fresh axios instance for each request\n        const axiosInstance = axios.create({\n          baseURL: API_CONFIG.BASE_URL,\n          headers: {\n            'accept': 'application/json',\n            'content-type': 'application/json',\n            'x-api-key': config?.exaApiKey || process.env.EXA_API_KEY || '',\n            'x-exa-integration': 'company-research-mcp'\n          },\n          timeout: 25000\n        });\n\n        const searchRequest: ExaSearchRequest = {\n          query: `${companyName} company`,\n          type: \"auto\",\n          numResults: numResults || 3,\n          category: \"company\",\n          contents: {\n            highlights: true\n          }\n        };\n        \n        checkpoint('company_research_request_prepared');\n        logger.log(\"Sending request to Exa API for company research\");\n        \n        const response = await axiosInstance.post<ExaSearchResponse>(\n          API_CONFIG.ENDPOINTS.SEARCH,\n          searchRequest,\n          { timeout: 25000 }\n        );\n        \n        checkpoint('company_research_response_received');\n        logger.log(\"Received response from Exa API\");\n\n        if (!response.data || !response.data.results || response.data.results.length === 0) {\n          logger.log(\"Warning: Empty or invalid response from Exa API\");\n          checkpoint('company_research_complete');\n          return {\n            content: [{\n              type: \"text\" as const,\n              text: \"No company information found. Please try a different company name.\"\n            }]\n          };\n        }\n\n        logger.log(`Found ${response.data.results.length} company research results`);\n\n        const sanitized = sanitizeSearchResponse(response.data);\n        const results = Array.isArray(sanitized.results) ? sanitized.results : [];\n\n        const formattedResults = results.map((r) => {\n          const highlights = Array.isArray(r.highlights) ? r.highlights.join('\\n') : '';\n          const lines = [\n            `Title: ${r.title || 'N/A'}`,\n            `URL: ${r.url}`,\n            `Published: ${r.publishedDate || 'N/A'}`,\n            `Author: ${r.author || 'N/A'}`,\n            `Highlights:\\n${highlights}`,\n          ];\n          return lines.join('\\n');\n        }).join('\\n\\n---\\n\\n');\n\n        const searchTime = typeof sanitized.searchTime === 'number' ? sanitized.searchTime : undefined;\n        const header = searchTime != null ? `Search Time: ${searchTime}ms\\n\\n` : '';\n        \n        const result = {\n          content: [{\n            type: \"text\" as const,\n            text: header + formattedResults\n          }]\n        };\n        \n        checkpoint('company_research_complete');\n        logger.complete();\n        return result;\n      } catch (error) {\n        logger.error(error);\n        \n        // Check for rate limit error on free MCP\n        const rateLimitResult = handleRateLimitError(error, config?.userProvidedApiKey, 'company_research_exa');\n        if (rateLimitResult) {\n          return rateLimitResult;\n        }\n        \n        if (axios.isAxiosError(error)) {\n          // Handle Axios errors specifically\n          const statusCode = error.response?.status || 'unknown';\n          const errorMessage = error.response?.data?.message || error.message;\n          \n          logger.log(`Axios error (${statusCode}): ${errorMessage}`);\n          return {\n            content: [{\n              type: \"text\" as const,\n              text: `Company research error (${statusCode}): ${errorMessage}`\n            }],\n            isError: true,\n          };\n        }\n        \n        // Handle generic errors\n        return {\n          content: [{\n            type: \"text\" as const,\n            text: `Company research error: ${error instanceof Error ? error.message : String(error)}`\n          }],\n          isError: true,\n        };\n      }\n    }\n  );\n}                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                "
  },
  {
    "path": "src/tools/config.ts",
    "content": "// Configuration for API\nexport const API_CONFIG = {\n  BASE_URL: 'https://api.exa.ai',\n  ENDPOINTS: {\n    SEARCH: '/search',\n    RESEARCH: '/research/v1',\n    CONTEXT: '/context'\n  },\n  DEFAULT_NUM_RESULTS: 8,\n  DEFAULT_MAX_CHARACTERS: 2000\n} as const;  "
  },
  {
    "path": "src/tools/crawling.ts",
    "content": "import { z } from \"zod\";\nimport axios from \"axios\";\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { API_CONFIG } from \"./config.js\";\nimport { createRequestLogger } from \"../utils/logger.js\";\nimport { handleRateLimitError } from \"../utils/errorHandler.js\";\nimport { sanitizeContentsResponse } from \"../utils/exaResponseSanitizer.js\";\nimport { checkpoint } from \"agnost\";\n\nexport function registerCrawlingTool(server: McpServer, config?: { exaApiKey?: string; userProvidedApiKey?: boolean }): void {\n  server.tool(\n    \"crawling_exa\",\n    `Get the full content of a specific webpage. Use when you have an exact URL.\n\nBest for: Extracting content from a known URL.\nReturns: Full text content and metadata from the page.`,\n    {\n      url: z.string().describe(\"URL to crawl and extract content from\"),\n      maxCharacters: z.coerce.number().optional().describe(\"Maximum characters to extract (must be a number, default: 3000)\")\n    },\n    {\n      readOnlyHint: true,\n      destructiveHint: false,\n      idempotentHint: true\n    },\n    async ({ url, maxCharacters }) => {\n      const requestId = `crawling_exa-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`;\n      const logger = createRequestLogger(requestId, 'crawling_exa');\n      \n      logger.start(url);\n      \n      try {\n        // Create a fresh axios instance for each request\n        const axiosInstance = axios.create({\n          baseURL: API_CONFIG.BASE_URL,\n          headers: {\n            'accept': 'application/json',\n            'content-type': 'application/json',\n            'x-api-key': config?.exaApiKey || process.env.EXA_API_KEY || '',\n            'x-exa-integration': 'crawling-mcp'\n          },\n          timeout: 25000\n        });\n\n        const crawlRequest = {\n          ids: [url],\n          contents: {\n            text: {\n              maxCharacters: maxCharacters || API_CONFIG.DEFAULT_MAX_CHARACTERS\n            },\n            livecrawl: 'preferred'\n          }\n        };\n        \n        checkpoint('crawl_request_prepared');\n        logger.log(\"Sending crawl request to Exa API\");\n        \n        const response = await axiosInstance.post(\n          '/contents',\n          crawlRequest,\n          { timeout: 25000 }\n        );\n        \n        checkpoint('crawl_response_received');\n        logger.log(\"Received response from Exa API\");\n\n        if (!response.data || !response.data.results || response.data.results.length === 0) {\n          logger.log(\"Warning: Empty or invalid response from Exa API\");\n          checkpoint('crawl_complete');\n          return {\n            content: [{\n              type: \"text\" as const,\n              text: \"No content found for the provided URL.\"\n            }]\n          };\n        }\n\n        logger.log(`Successfully crawled content from URL`);\n\n        const result = {\n          content: [{\n            type: \"text\" as const,\n            text: JSON.stringify(sanitizeContentsResponse(response.data), null, 2)\n          }]\n        };\n        \n        checkpoint('crawl_complete');\n        logger.complete();\n        return result;\n      } catch (error) {\n        logger.error(error);\n        \n        // Check for rate limit error on free MCP\n        const rateLimitResult = handleRateLimitError(error, config?.userProvidedApiKey, 'crawling_exa');\n        if (rateLimitResult) {\n          return rateLimitResult;\n        }\n        \n        if (axios.isAxiosError(error)) {\n          // Handle Axios errors specifically\n          const statusCode = error.response?.status || 'unknown';\n          const errorMessage = error.response?.data?.message || error.message;\n          \n          logger.log(`Axios error (${statusCode}): ${errorMessage}`);\n          return {\n            content: [{\n              type: \"text\" as const,\n              text: `Crawling error (${statusCode}): ${errorMessage}`\n            }],\n            isError: true,\n          };\n        }\n        \n        // Handle generic errors\n        return {\n          content: [{\n            type: \"text\" as const,\n            text: `Crawling error: ${error instanceof Error ? error.message : String(error)}`\n          }],\n          isError: true,\n        };\n      }\n    }\n  );\n}                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                "
  },
  {
    "path": "src/tools/deepResearchCheck.ts",
    "content": "import { z } from \"zod\";\nimport axios from \"axios\";\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { API_CONFIG } from \"./config.js\";\nimport { DeepResearchCheckResponse, DeepResearchErrorResponse } from \"../types.js\";\nimport { createRequestLogger } from \"../utils/logger.js\";\nimport { handleRateLimitError } from \"../utils/errorHandler.js\";\nimport { checkpoint } from \"agnost\";\n\n// Helper function to create a delay\nfunction delay(ms: number): Promise<void> {\n  return new Promise(resolve => setTimeout(resolve, ms));\n}\n\nexport function registerDeepResearchCheckTool(server: McpServer, config?: { exaApiKey?: string; userProvidedApiKey?: boolean }): void {\n  server.tool(\n    \"deep_researcher_check\",\n    `[Deprecated] Check status and get results from a deep research task.\n\nBest for: Getting the research report after calling deep_researcher_start.\nReturns: Research report when complete, or status update if still running.\nImportant: Keep calling with the same research ID until status is 'completed'.`,\n    {\n      researchId: z.string().describe(\"The research ID returned from deep_researcher_start tool\")\n    },\n    {\n      readOnlyHint: true,\n      destructiveHint: false,\n      idempotentHint: true\n    },\n    async ({ researchId }) => {\n      const requestId = `deep_researcher_check-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`;\n      const logger = createRequestLogger(requestId, 'deep_researcher_check');\n      \n      logger.start(researchId);\n      \n      try {\n        // Built-in delay to allow processing time\n        logger.log(\"Waiting 5 seconds before checking status...\");\n        await delay(5000);\n        checkpoint('deep_research_check_delay_complete');\n\n        // Create a fresh axios instance for each request\n        const axiosInstance = axios.create({\n          baseURL: API_CONFIG.BASE_URL,\n          headers: {\n            'accept': 'application/json',\n            'x-api-key': config?.exaApiKey || process.env.EXA_API_KEY || '',\n            'x-exa-integration': 'deep-research-mcp'\n          },\n          timeout: 25000\n        });\n\n        logger.log(`Checking status for research: ${researchId}`);\n        \n        checkpoint('deep_research_check_request_prepared');\n        const response = await axiosInstance.get<DeepResearchCheckResponse>(\n          `${API_CONFIG.ENDPOINTS.RESEARCH}/${researchId}`,\n          { timeout: 25000 }\n        );\n        \n        checkpoint('deep_research_check_response_received');\n        logger.log(`Task status: ${response.data.status}`);\n\n        if (!response.data) {\n          logger.log(\"Warning: Empty response from Exa Research API\");\n          checkpoint('deep_research_check_complete');\n          return {\n            content: [{\n              type: \"text\" as const,\n              text: \"Failed to check research task status. Please try again.\"\n            }],\n            isError: true,\n          };\n        }\n\n        // Format the response based on status\n        let resultText: string;\n        \n        if (response.data.status === 'completed') {\n          resultText = JSON.stringify({\n            success: true,\n            status: response.data.status,\n            researchId: response.data.researchId,\n            report: response.data.output?.content || \"No report generated\",\n            parsedOutput: response.data.output?.parsed,\n            citations: response.data.citations,\n            model: response.data.model,\n            costDollars: response.data.costDollars,\n            message: \"Deep research completed! Here's your comprehensive research report.\"\n          }, null, 2);\n          logger.log(\"Research completed successfully\");\n        } else if (response.data.status === 'running' || response.data.status === 'pending') {\n          resultText = JSON.stringify({\n            success: true,\n            status: response.data.status,\n            researchId: response.data.researchId,\n            message: \"Research in progress. Continue polling...\",\n            nextAction: \"Call deep_researcher_check again with the same research ID\"\n          }, null, 2);\n          logger.log(\"Research still in progress\");\n        } else if (response.data.status === 'failed') {\n          resultText = JSON.stringify({\n            success: false,\n            status: response.data.status,\n            researchId: response.data.researchId,\n            createdAt: new Date(response.data.createdAt).toISOString(),\n            instructions: response.data.instructions,\n            message: \"Deep research task failed. Please try starting a new research task with different instructions.\"\n          }, null, 2);\n          logger.log(\"Research task failed\");\n        } else if (response.data.status === 'canceled') {\n          resultText = JSON.stringify({\n            success: false,\n            status: response.data.status,\n            researchId: response.data.researchId,\n            message: \"Research task was canceled.\"\n          }, null, 2);\n          logger.log(\"Research task canceled\");\n        } else {\n          resultText = JSON.stringify({\n            success: false,\n            status: response.data.status,\n            researchId: response.data.researchId,\n            message: `Unknown status: ${response.data.status}. Continue polling or restart the research task.`\n          }, null, 2);\n          logger.log(`Unknown status: ${response.data.status}`);\n        }\n\n        const result = {\n          content: [{\n            type: \"text\" as const,\n            text: resultText\n          }]\n        };\n        \n        checkpoint('deep_research_check_complete');\n        logger.complete();\n        return result;\n      } catch (error) {\n        logger.error(error);\n        \n        if (axios.isAxiosError(error)) {\n          // Handle specific 404 error for task not found\n          if (error.response?.status === 404) {\n            const errorData = error.response.data as DeepResearchErrorResponse;\n            logger.log(`Research not found: ${researchId}`);\n            return {\n              content: [{\n                type: \"text\" as const,\n                text: JSON.stringify({\n                  success: false,\n                  error: \"Research not found\",\n                  researchId: researchId,\n                  message: \"The specified research ID was not found. Please check the ID or start a new research task using deep_researcher_start.\"\n                }, null, 2)\n              }],\n              isError: true,\n            };\n          }\n          \n          // Check for rate limit error on free MCP\n          const rateLimitResult = handleRateLimitError(error, config?.userProvidedApiKey, 'deep_researcher_check');\n          if (rateLimitResult) {\n            return rateLimitResult;\n          }\n          \n          // Handle other Axios errors\n          const statusCode = error.response?.status || 'unknown';\n          const errorMessage = error.response?.data?.message || error.message;\n          \n          logger.log(`Axios error (${statusCode}): ${errorMessage}`);\n          return {\n            content: [{\n              type: \"text\" as const,\n              text: `Research check error (${statusCode}): ${errorMessage}`\n            }],\n            isError: true,\n          };\n        }\n        \n        // Handle generic errors\n        return {\n          content: [{\n            type: \"text\" as const,\n            text: `Research check error: ${error instanceof Error ? error.message : String(error)}`\n          }],\n          isError: true,\n        };\n      }\n    }\n  );\n}                                                                                                                                                                                                                                                                                                "
  },
  {
    "path": "src/tools/deepResearchStart.ts",
    "content": "import { z } from \"zod\";\nimport axios from \"axios\";\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { API_CONFIG } from \"./config.js\";\nimport { DeepResearchRequest, DeepResearchStartResponse } from \"../types.js\";\nimport { createRequestLogger } from \"../utils/logger.js\";\nimport { handleRateLimitError } from \"../utils/errorHandler.js\";\nimport { checkpoint } from \"agnost\";\n\nexport function registerDeepResearchStartTool(server: McpServer, config?: { exaApiKey?: string; userProvidedApiKey?: boolean }): void {\n  server.tool(\n    \"deep_researcher_start\",\n    `[Deprecated] Start an AI research agent that searches, reads, and writes a detailed report. Takes 15 seconds to 2 minutes.\n\nBest for: Complex research questions needing deep analysis and synthesis.\nReturns: Research ID - use deep_researcher_check to get results.\nImportant: Call deep_researcher_check with the returned research ID to get the report.`,\n    {\n      instructions: z.string().describe(\"Complex research question or detailed instructions for the AI researcher. Be specific about what you want to research and any particular aspects you want covered.\"),\n      model: z.enum(['exa-research-fast', 'exa-research', 'exa-research-pro']).optional().describe(\"Research model: 'exa-research-fast' (fastest, ~15s, good for simple queries), 'exa-research' (balanced, 15-45s, good for most queries), or 'exa-research-pro' (most comprehensive, 45s-3min, for complex topics). Default: exa-research-fast\"),\n      outputSchema: z.record(z.unknown()).optional().describe(\"Optional JSON Schema for structured output. When provided, the research report will include a 'parsed' field with data matching this schema.\")\n    },\n    {\n      readOnlyHint: false,\n      destructiveHint: false,\n      idempotentHint: false\n    },\n    async ({ instructions, model, outputSchema }) => {\n      const requestId = `deep_researcher_start-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`;\n      const logger = createRequestLogger(requestId, 'deep_researcher_start');\n      \n      logger.start(instructions);\n      \n      try {\n        // Create a fresh axios instance for each request\n        const axiosInstance = axios.create({\n          baseURL: API_CONFIG.BASE_URL,\n          headers: {\n            'accept': 'application/json',\n            'content-type': 'application/json',\n            'x-api-key': config?.exaApiKey || process.env.EXA_API_KEY || '',\n            'x-exa-integration': 'deep-research-mcp'\n          },\n          timeout: 25000\n        });\n\n        const researchRequest: DeepResearchRequest = {\n          model: model || 'exa-research-fast',\n          instructions,\n          ...(outputSchema && { outputSchema })\n        };\n        \n        checkpoint('deep_research_start_request_prepared', {\n          model: researchRequest.model\n        });\n        logger.log(`Starting research with model: ${researchRequest.model}`);\n        \n        const response = await axiosInstance.post<DeepResearchStartResponse>(\n          API_CONFIG.ENDPOINTS.RESEARCH,\n          researchRequest,\n          { timeout: 25000 }\n        );\n        \n        checkpoint('deep_research_start_response_received');\n        logger.log(`Research task started with ID: ${response.data.researchId}`);\n\n        if (!response.data || !response.data.researchId) {\n          logger.log(\"Warning: Empty or invalid response from Exa Research API\");\n          checkpoint('deep_research_start_complete');\n          return {\n            content: [{\n              type: \"text\" as const,\n              text: \"Failed to start research task. Please try again.\"\n            }],\n            isError: true,\n          };\n        }\n\n        const result = {\n          content: [{\n            type: \"text\" as const,\n            text: JSON.stringify({\n              success: true,\n              researchId: response.data.researchId,\n              model: researchRequest.model,\n              instructions: instructions,\n              message: `Deep research task started successfully with ${researchRequest.model} model. IMMEDIATELY use deep_researcher_check with research ID '${response.data.researchId}' to monitor progress. Keep checking every few seconds until status is 'completed' to get the research results.`,\n              nextStep: `Call deep_researcher_check with researchId: \"${response.data.researchId}\"`\n            }, null, 2)\n          }]\n        };\n        \n        checkpoint('deep_research_start_complete');\n        logger.complete();\n        return result;\n      } catch (error) {\n        logger.error(error);\n        \n        // Check for rate limit error on free MCP\n        const rateLimitResult = handleRateLimitError(error, config?.userProvidedApiKey, 'deep_researcher_start');\n        if (rateLimitResult) {\n          return rateLimitResult;\n        }\n        \n        if (axios.isAxiosError(error)) {\n          // Handle Axios errors specifically\n          const statusCode = error.response?.status || 'unknown';\n          const errorMessage = error.response?.data?.message || error.message;\n          \n          logger.log(`Axios error (${statusCode}): ${errorMessage}`);\n          return {\n            content: [{\n              type: \"text\" as const,\n              text: `Research start error (${statusCode}): ${errorMessage}`\n            }],\n            isError: true,\n          };\n        }\n        \n        // Handle generic errors\n        return {\n          content: [{\n            type: \"text\" as const,\n            text: `Research start error: ${error instanceof Error ? error.message : String(error)}`\n          }],\n          isError: true,\n        };\n      }\n    }\n  );\n}                                                                                                                                                                                                                                                                                                "
  },
  {
    "path": "src/tools/deepSearch.ts",
    "content": "import { z } from \"zod\";\nimport axios from \"axios\";\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { API_CONFIG } from \"./config.js\";\nimport { ExaDeepSearchRequest, ExaDeepSearchResponse } from \"../types.js\";\nimport { createRequestLogger } from \"../utils/logger.js\";\nimport { handleRateLimitError } from \"../utils/errorHandler.js\";\nimport { sanitizeDeepSearchStructuredResponse } from \"../utils/exaResponseSanitizer.js\";\nimport { checkpoint } from \"agnost\";\n\nexport function registerDeepSearchTool(server: McpServer, config?: { exaApiKey?: string; userProvidedApiKey?: boolean }): void {\n  server.tool(\n    \"deep_search_exa\",\n    `[Deprecated: Use web_search_advanced_exa instead] Deep search with automatic query expansion for thorough research. Generates multiple search variations to find results from multiple angles, then synthesizes a short answer with citations.\n\nBest for: Complex questions needing information from multiple angles.\nReturns: A synthesized answer with citations, plus individual search results with highlights. When structuredOutput is enabled, returns structured JSON instead of markdown.\nNote: Requires an Exa API key. 'deep' mode takes 4-12s, 'deep-reasoning' takes 12-50s.`,\n    {\n      objective: z.string().describe(\"Natural language description of what the web search is looking for. Try to make the search query atomic - looking for a specific piece of information.\"),\n      search_queries: z.array(z.string()).optional().describe(\"Optional list of keyword search queries related to the objective. Limited to 5 entries of up to 5 words each (~200 characters).\"),\n      type: z.enum(['deep', 'deep-reasoning']).optional().describe(\"Search depth - 'deep': fast deep search (4-12s, default), 'deep-reasoning': thorough with reasoning (12-50s)\"),\n      numResults: z.coerce.number().optional().describe(\"Number of search results to return (must be a number, default: 8)\"),\n      highlightMaxCharacters: z.coerce.number().optional().describe(\"Maximum characters for highlights per result (must be a number, default: 4000)\"),\n      outputSchema: z.record(z.string(), z.unknown()).optional().describe(\"JSON schema for structured output. Must include a 'type' field set to 'object' or 'text'. For 'object' type, optionally include 'properties' and 'required'. Max 10 total properties, max nesting depth 2. When provided, automatically enables structured output mode.\"),\n      systemPrompt: z.string().max(32000).optional().describe(\"Instructions for how the deep search agent should process and format results.\"),\n      structuredOutput: z.boolean().optional().describe(\"When true, returns a structured JSON response instead of markdown. The API will determine the appropriate structure based on the query. Prefer using outputSchema for more control over the response shape.\"),\n    },\n    {\n      readOnlyHint: true,\n      destructiveHint: false,\n      idempotentHint: false\n    },\n    async ({ objective, search_queries, type, numResults, highlightMaxCharacters, outputSchema, systemPrompt, structuredOutput }) => {\n      const requestId = `deep_search_exa-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`;\n      const logger = createRequestLogger(requestId, 'deep_search_exa');\n\n      logger.start(objective);\n\n      try {\n        const axiosInstance = axios.create({\n          baseURL: API_CONFIG.BASE_URL,\n          headers: {\n            'accept': 'application/json',\n            'content-type': 'application/json',\n            'x-api-key': config?.exaApiKey || process.env.EXA_API_KEY || '',\n            'x-exa-integration': 'deep-search-mcp'\n          },\n          timeout: 55000\n        });\n\n        const searchRequest: ExaDeepSearchRequest = {\n          query: objective,\n          type: type || \"deep\",\n          numResults: numResults || API_CONFIG.DEFAULT_NUM_RESULTS,\n          contents: {\n            highlights: {\n              maxCharacters: highlightMaxCharacters || 4000\n            }\n          }\n        };\n\n        if (outputSchema) {\n          searchRequest.outputSchema = outputSchema;\n          logger.log(\"Using custom output schema\");\n        } else if (structuredOutput) {\n          searchRequest.outputSchema = { type: \"object\" };\n          logger.log(\"Using default structured output\");\n        }\n\n        if (systemPrompt) {\n          searchRequest.systemPrompt = systemPrompt;\n          logger.log(\"Using system prompt\");\n        }\n\n        if (search_queries && search_queries.length > 0) {\n          searchRequest.additionalQueries = search_queries;\n          logger.log(`Using ${search_queries.length} additional queries`);\n        } else {\n          logger.log(\"Using automatic query expansion\");\n        }\n\n        checkpoint('deep_search_request_prepared');\n        logger.log(\"Sending deep search request to Exa API\");\n\n        const response = await axiosInstance.post<ExaDeepSearchResponse>(\n          API_CONFIG.ENDPOINTS.SEARCH,\n          searchRequest,\n          { timeout: 55000 }\n        );\n\n        checkpoint('deep_search_response_received');\n        logger.log(\"Received response from Exa API\");\n\n        if (!response.data) {\n          logger.log(\"Warning: Empty response from Exa API\");\n          checkpoint('deep_search_complete');\n          return {\n            content: [{\n              type: \"text\" as const,\n              text: \"No search results found. Please try a different query.\"\n            }]\n          };\n        }\n\n        const data = response.data;\n\n        // When structured output was requested (via outputSchema or structuredOutput flag), return the raw JSON response\n        if (outputSchema || structuredOutput) {\n          const structuredResponse = sanitizeDeepSearchStructuredResponse(data);\n\n          const text = JSON.stringify(structuredResponse, null, 2);\n          logger.log(`Structured response prepared with ${text.length} characters`);\n\n          const result = {\n            content: [{\n              type: \"text\" as const,\n              text\n            }]\n          };\n\n          checkpoint('deep_search_complete');\n          logger.complete();\n          return result;\n        }\n\n        const parts: string[] = [];\n\n        // Synthesized answer\n        if (data.output?.content && typeof data.output.content === 'string') {\n          parts.push(`## Answer\\n\\n${data.output.content}`);\n        }\n\n        // Citations from grounding (aggregated into a single section)\n        if (data.output?.grounding) {\n          const allCitations: string[] = [];\n          for (const g of data.output.grounding) {\n            if (g.citations && g.citations.length > 0) {\n              allCitations.push(...g.citations.map(c => `- [${c.title}](${c.url})`));\n            }\n          }\n          if (allCitations.length > 0) {\n            parts.push(`## Citations\\n\\n${allCitations.join('\\n')}`);\n          }\n        }\n\n        // Individual results as markdown\n        if (data.results && data.results.length > 0) {\n          const resultLines = data.results.map((r, i) => {\n            const lines: string[] = [];\n            lines.push(`### ${i + 1}. ${r.title || 'Untitled'}`);\n            lines.push(`**URL:** ${r.url}`);\n            if (r.publishedDate) {\n              lines.push(`**Published:** ${r.publishedDate}`);\n            }\n            if (r.image) {\n              lines.push(`**Image:** ${r.image}`);\n            }\n            if (r.highlights && r.highlights.length > 0) {\n              lines.push(`\\n${r.highlights.join('\\n\\n')}`);\n            }\n            return lines.join('\\n');\n          });\n          parts.push(`## Results\\n\\n${resultLines.join('\\n\\n---\\n\\n')}`);\n        }\n\n        const text = parts.length > 0\n          ? parts.join('\\n\\n---\\n\\n')\n          : \"No results found. Please try a different query.\";\n\n        logger.log(`Response prepared with ${text.length} characters`);\n\n        const result = {\n          content: [{\n            type: \"text\" as const,\n            text\n          }]\n        };\n\n        checkpoint('deep_search_complete');\n        logger.complete();\n        return result;\n      } catch (error) {\n        checkpoint('deep_search_complete');\n        logger.error(error);\n\n        const rateLimitResult = handleRateLimitError(error, config?.userProvidedApiKey, 'deep_search_exa');\n        if (rateLimitResult) {\n          return rateLimitResult;\n        }\n\n        if (axios.isAxiosError(error)) {\n          const statusCode = error.response?.status || 'unknown';\n          const errorMessage = error.response?.data?.message || error.message;\n\n          logger.log(`Axios error (${statusCode}): ${errorMessage}`);\n          return {\n            content: [{\n              type: \"text\" as const,\n              text: `Deep search error (${statusCode}): ${errorMessage}`\n            }],\n            isError: true,\n          };\n        }\n\n        return {\n          content: [{\n            type: \"text\" as const,\n            text: `Deep search error: ${error instanceof Error ? error.message : String(error)}`\n          }],\n          isError: true,\n        };\n      }\n    }\n  );\n}\n"
  },
  {
    "path": "src/tools/exaCode.ts",
    "content": "import { z } from \"zod\";\nimport axios from \"axios\";\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { API_CONFIG } from \"./config.js\";\nimport { ExaCodeRequest, ExaCodeResponse } from \"../types.js\";\nimport { createRequestLogger } from \"../utils/logger.js\";\nimport { handleRateLimitError } from \"../utils/errorHandler.js\";\nimport { checkpoint } from \"agnost\";\n\nexport function registerExaCodeTool(server: McpServer, config?: { exaApiKey?: string; userProvidedApiKey?: boolean }): void {\n  server.tool(\n    \"get_code_context_exa\",\n    `Find code examples, documentation, and programming solutions. Searches GitHub, Stack Overflow, and official docs.\n\nBest for: Any programming question - API usage, library examples, code snippets, debugging help.\nReturns: Relevant code and documentation, formatted for easy reading.`,\n    {\n      query: z.string().describe(\"Search query to find relevant context for APIs, Libraries, and SDKs. For example, 'React useState hook examples', 'Python pandas dataframe filtering', 'Express.js middleware', 'Next js partial prerendering configuration'\"),\n      tokensNum: z.coerce.number().min(1000).max(50000).default(5000).describe(\"Number of tokens to return (must be a number, 1000-50000). Default is 5000 tokens. Adjust this value based on how much context you need - use lower values for focused queries and higher values for comprehensive documentation.\")\n    },\n    {\n      readOnlyHint: true,\n      destructiveHint: false,\n      idempotentHint: true\n    },\n    async ({ query, tokensNum }) => {\n      const requestId = `get_code_context_exa-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`;\n      const logger = createRequestLogger(requestId, 'get_code_context_exa');\n      \n      logger.start(`Searching for code context: ${query}`);\n      \n      try {\n        // Create a fresh axios instance for each request\n        const axiosInstance = axios.create({\n          baseURL: API_CONFIG.BASE_URL,\n          headers: {\n            'accept': 'application/json',\n            'content-type': 'application/json',\n            'x-api-key': config?.exaApiKey || process.env.EXA_API_KEY || '',\n            'x-exa-integration': 'exa-code-mcp'\n          },\n          timeout: 30000\n        });\n\n        const exaCodeRequest: ExaCodeRequest = {\n          query,\n          tokensNum\n        };\n        \n        checkpoint('code_context_request_prepared');\n        logger.log(\"Sending code context request to Exa API\");\n        \n        const response = await axiosInstance.post<ExaCodeResponse>(\n          API_CONFIG.ENDPOINTS.CONTEXT,\n          exaCodeRequest,\n          { timeout: 30000 }\n        );\n        \n        checkpoint('code_context_response_received');\n        logger.log(\"Received code context response from Exa API\");\n\n        if (!response.data) {\n          logger.log(\"Warning: Empty response from Exa Code API\");\n          checkpoint('code_context_complete');\n          return {\n            content: [{\n              type: \"text\" as const,\n              text: \"No code snippets or documentation found. Please try a different query, be more specific about the library or programming concept, or check the spelling of framework names.\"\n            }]\n          };\n        }\n\n        logger.log(`Code search completed with ${response.data.resultsCount || 0} results`);\n        \n        // Return the actual code content from the response field\n        const codeContent = typeof response.data.response === 'string' \n          ? response.data.response \n          : JSON.stringify(response.data.response, null, 2);\n        \n        const result = {\n          content: [{\n            type: \"text\" as const,\n            text: codeContent\n          }]\n        };\n        \n        checkpoint('code_context_complete');\n        logger.complete();\n        return result;\n      } catch (error) {\n        logger.error(error);\n        \n        // Check for rate limit error on free MCP\n        const rateLimitResult = handleRateLimitError(error, config?.userProvidedApiKey, 'get_code_context_exa');\n        if (rateLimitResult) {\n          return rateLimitResult;\n        }\n        \n        if (axios.isAxiosError(error)) {\n          // Handle Axios errors specifically\n          const statusCode = error.response?.status || 'unknown';\n          const errorMessage = error.response?.data?.message || error.message;\n          \n          logger.log(`Axios error (${statusCode}): ${errorMessage}`);\n          return {\n            content: [{\n              type: \"text\" as const,\n              text: `Code search error (${statusCode}): ${errorMessage}. Please check your query and try again.`\n            }],\n            isError: true,\n          };\n        }\n        \n        // Handle generic errors\n        return {\n          content: [{\n            type: \"text\" as const,\n            text: `Code search error: ${error instanceof Error ? error.message : String(error)}`\n          }],\n          isError: true,\n        };\n      }\n    }\n  );\n}\n"
  },
  {
    "path": "src/tools/linkedInSearch.ts",
    "content": "import { z } from \"zod\";\nimport axios from \"axios\";\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { API_CONFIG } from \"./config.js\";\nimport { ExaSearchRequest, ExaSearchResponse } from \"../types.js\";\nimport { createRequestLogger } from \"../utils/logger.js\";\nimport { handleRateLimitError } from \"../utils/errorHandler.js\";\nimport { sanitizeSearchResponse } from \"../utils/exaResponseSanitizer.js\";\nimport { checkpoint } from \"agnost\";\n\nexport function registerLinkedInSearchTool(server: McpServer, config?: { exaApiKey?: string; userProvidedApiKey?: boolean }): void {\n  server.tool(\n    \"linkedin_search_exa\",\n    \"⚠️ DEPRECATED: This tool is deprecated. Please use 'people_search_exa' instead. This tool will be removed in a future version. For now, it searches for people on LinkedIn using Exa AI - finds professional profiles and people.\",\n    {\n      query: z.string().describe(\"Search query for finding people on LinkedIn\"),\n      numResults: z.coerce.number().optional().describe(\"Number of LinkedIn profile results to return (must be a number, default: 5)\")\n    },\n    {\n      readOnlyHint: true,\n      destructiveHint: false,\n      idempotentHint: true\n    },\n    async ({ query, numResults }) => {\n      const requestId = `linkedin_search_exa-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`;\n      const logger = createRequestLogger(requestId, 'linkedin_search_exa');\n      \n      logger.start(`${query}`);\n      \n      try {\n        // Create a fresh axios instance for each request\n        const axiosInstance = axios.create({\n          baseURL: API_CONFIG.BASE_URL,\n          headers: {\n            'accept': 'application/json',\n            'content-type': 'application/json',\n            'x-api-key': config?.exaApiKey || process.env.EXA_API_KEY || '',\n            'x-exa-integration': 'linkedin-search-mcp'\n          },\n          timeout: 25000\n        });\n\n        let searchQuery = query;\n        searchQuery = `${query} LinkedIn profile`;\n\n        const searchRequest: ExaSearchRequest = {\n          query: searchQuery,\n          type: \"auto\",\n          numResults: numResults || API_CONFIG.DEFAULT_NUM_RESULTS,\n          category: \"people\",\n          contents: {\n            highlights: true,\n          },\n        };\n        \n        checkpoint('linkedin_search_request_prepared');\n        logger.log(\"Sending request to Exa API for LinkedIn search\");\n        \n        const response = await axiosInstance.post<ExaSearchResponse>(\n          API_CONFIG.ENDPOINTS.SEARCH,\n          searchRequest,\n          { timeout: 25000 }\n        );\n        \n        checkpoint('linkedin_search_response_received');\n        logger.log(\"Received response from Exa API\");\n\n        if (!response.data || !response.data.results || response.data.results.length === 0) {\n          logger.log(\"Warning: Empty or invalid response from Exa API\");\n          checkpoint('linkedin_search_complete');\n          return {\n            content: [{\n              type: \"text\" as const,\n              text: \"No LinkedIn content found. Please try a different query. Note: This tool is deprecated - please use 'people_search_exa' instead.\"\n            }]\n          };\n        }\n\n        logger.log(`Found ${response.data.results.length} LinkedIn results`);\n\n        const sanitized = sanitizeSearchResponse(response.data);\n        const results = Array.isArray(sanitized.results) ? sanitized.results : [];\n\n        const formattedResults = results.map((r) => {\n          const highlights = Array.isArray(r.highlights) ? r.highlights.join('\\n') : '';\n          const lines = [\n            `Title: ${r.title || 'N/A'}`,\n            `URL: ${r.url}`,\n            `Published: ${r.publishedDate || 'N/A'}`,\n            `Author: ${r.author || 'N/A'}`,\n            `Highlights:\\n${highlights}`,\n          ];\n          return lines.join('\\n');\n        }).join('\\n\\n---\\n\\n');\n\n        const searchTime = typeof sanitized.searchTime === 'number' ? sanitized.searchTime : undefined;\n        const header = searchTime != null ? `Search Time: ${searchTime}ms\\n\\n` : '';\n        const deprecationNotice = \"\\n\\n⚠️ DEPRECATION NOTICE: This tool (linkedin_search_exa) is deprecated. Please use 'people_search_exa' instead for future requests.\";\n        \n        const result = {\n          content: [{\n            type: \"text\" as const,\n            text: header + formattedResults + deprecationNotice\n          }]\n        };\n        \n        checkpoint('linkedin_search_complete');\n        logger.complete();\n        return result;\n      } catch (error) {\n        logger.error(error);\n        \n        // Check for rate limit error on free MCP\n        const rateLimitResult = handleRateLimitError(error, config?.userProvidedApiKey, 'linkedin_search_exa');\n        if (rateLimitResult) {\n          return rateLimitResult;\n        }\n        \n        if (axios.isAxiosError(error)) {\n          // Handle Axios errors specifically\n          const statusCode = error.response?.status || 'unknown';\n          const errorMessage = error.response?.data?.message || error.message;\n          \n          logger.log(`Axios error (${statusCode}): ${errorMessage}`);\n          return {\n            content: [{\n              type: \"text\" as const,\n              text: `LinkedIn search error (${statusCode}): ${errorMessage}\\n\\n⚠️ Note: This tool is deprecated. Please use 'people_search_exa' instead.`\n            }],\n            isError: true,\n          };\n        }\n        \n        // Handle generic errors\n        return {\n          content: [{\n            type: \"text\" as const,\n            text: `LinkedIn search error: ${error instanceof Error ? error.message : String(error)}\\n\\n⚠️ Note: This tool is deprecated. Please use 'people_search_exa' instead.`\n          }],\n          isError: true,\n        };\n      }\n    }\n  );\n}\n"
  },
  {
    "path": "src/tools/peopleSearch.ts",
    "content": "import { z } from \"zod\";\nimport axios from \"axios\";\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { API_CONFIG } from \"./config.js\";\nimport { ExaSearchRequest, ExaSearchResponse } from \"../types.js\";\nimport { createRequestLogger } from \"../utils/logger.js\";\nimport { handleRateLimitError } from \"../utils/errorHandler.js\";\nimport { sanitizeSearchResponse } from \"../utils/exaResponseSanitizer.js\";\nimport { checkpoint } from \"agnost\";\n\nexport function registerPeopleSearchTool(server: McpServer, config?: { exaApiKey?: string; userProvidedApiKey?: boolean }): void {\n  server.tool(\n    \"people_search_exa\",\n    `[Deprecated: Use web_search_advanced_exa instead] Find people and their professional profiles.\n\nBest for: Finding professionals, executives, or anyone with a public profile.\nReturns: Profile information and links.`,\n    {\n      query: z.string().describe(\"Search query for finding people\"),\n      numResults: z.coerce.number().optional().describe(\"Number of profile results to return (must be a number, default: 5)\")\n    },\n    {\n      readOnlyHint: true,\n      destructiveHint: false,\n      idempotentHint: true\n    },\n    async ({ query, numResults }) => {\n      const requestId = `people_search_exa-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`;\n      const logger = createRequestLogger(requestId, 'people_search_exa');\n      \n      logger.start(`${query}`);\n      \n      try {\n        // Create a fresh axios instance for each request\n        const axiosInstance = axios.create({\n          baseURL: API_CONFIG.BASE_URL,\n          headers: {\n            'accept': 'application/json',\n            'content-type': 'application/json',\n            'x-api-key': config?.exaApiKey || process.env.EXA_API_KEY || '',\n            'x-exa-integration': 'people-search-mcp'\n          },\n          timeout: 25000\n        });\n\n        let searchQuery = query;\n        searchQuery = `${query} profile`;\n\n        const searchRequest: ExaSearchRequest = {\n          query: searchQuery,\n          type: \"auto\",\n          numResults: numResults || API_CONFIG.DEFAULT_NUM_RESULTS,\n          category: \"people\",\n          contents: {\n            highlights: true,\n          },\n        };\n        \n        checkpoint('people_search_request_prepared');\n        logger.log(\"Sending request to Exa API for people search\");\n        \n        const response = await axiosInstance.post<ExaSearchResponse>(\n          API_CONFIG.ENDPOINTS.SEARCH,\n          searchRequest,\n          { timeout: 25000 }\n        );\n        \n        checkpoint('people_search_response_received');\n        logger.log(\"Received response from Exa API\");\n\n        if (!response.data || !response.data.results || response.data.results.length === 0) {\n          logger.log(\"Warning: Empty or invalid response from Exa API\");\n          checkpoint('people_search_complete');\n          return {\n            content: [{\n              type: \"text\" as const,\n              text: \"No content found. Please try a different query.\"\n            }]\n          };\n        }\n\n        logger.log(`Found ${response.data.results.length} results`);\n\n        const sanitized = sanitizeSearchResponse(response.data);\n        const results = Array.isArray(sanitized.results) ? sanitized.results : [];\n\n        const formattedResults = results.map((r) => {\n          const highlights = Array.isArray(r.highlights) ? r.highlights.join('\\n') : '';\n          const lines = [\n            `Title: ${r.title || 'N/A'}`,\n            `URL: ${r.url}`,\n            `Published: ${r.publishedDate || 'N/A'}`,\n            `Author: ${r.author || 'N/A'}`,\n            `Highlights:\\n${highlights}`,\n          ];\n          return lines.join('\\n');\n        }).join('\\n\\n---\\n\\n');\n\n        const searchTime = typeof sanitized.searchTime === 'number' ? sanitized.searchTime : undefined;\n        const header = searchTime != null ? `Search Time: ${searchTime}ms\\n\\n` : '';\n        \n        const result = {\n          content: [{\n            type: \"text\" as const,\n            text: header + formattedResults\n          }]\n        };\n        \n        checkpoint('people_search_complete');\n        logger.complete();\n        return result;\n      } catch (error) {\n        logger.error(error);\n        \n        // Check for rate limit error on free MCP\n        const rateLimitResult = handleRateLimitError(error, config?.userProvidedApiKey, 'people_search_exa');\n        if (rateLimitResult) {\n          return rateLimitResult;\n        }\n        \n        if (axios.isAxiosError(error)) {\n          // Handle Axios errors specifically\n          const statusCode = error.response?.status || 'unknown';\n          const errorMessage = error.response?.data?.message || error.message;\n          \n          logger.log(`Axios error (${statusCode}): ${errorMessage}`);\n          return {\n            content: [{\n              type: \"text\" as const,\n              text: `People search error (${statusCode}): ${errorMessage}`\n            }],\n            isError: true,\n          };\n        }\n        \n        // Handle generic errors\n        return {\n          content: [{\n            type: \"text\" as const,\n            text: `People search error: ${error instanceof Error ? error.message : String(error)}`\n          }],\n          isError: true,\n        };\n      }\n    }\n  );\n}\n"
  },
  {
    "path": "src/tools/webSearch.ts",
    "content": "import { z } from \"zod\";\nimport axios from \"axios\";\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { API_CONFIG } from \"./config.js\";\nimport { ExaSearchRequest, ExaSearchResponse } from \"../types.js\";\nimport { createRequestLogger } from \"../utils/logger.js\";\nimport { handleRateLimitError } from \"../utils/errorHandler.js\";\nimport { sanitizeSearchResponse } from \"../utils/exaResponseSanitizer.js\";\nimport { checkpoint } from \"agnost\"\n\nexport function registerWebSearchTool(server: McpServer, config?: { exaApiKey?: string; userProvidedApiKey?: boolean }): void {\n  server.tool(\n    \"web_search_exa\",\n    `Search the web for any topic and get clean, ready-to-use content.\n\nBest for: Finding current information, news, facts, or answering questions about any topic.\nReturns: Clean text content from top search results, ready for LLM use.`,\n    {\n      query: z.string().describe(\"Websearch query\"),\n      numResults: z.coerce.number().optional().describe(\"Number of search results to return (must be a number, default: 8)\"),\n      livecrawl: z.enum(['fallback', 'preferred']).optional().describe(\"Live crawl mode - 'fallback': use live crawling as backup if cached content unavailable, 'preferred': prioritize live crawling (default: 'fallback')\"),\n      type: z.enum(['auto', 'fast']).optional().describe(\"Search type - 'auto': balanced search (default), 'fast': quick results\"),\n      category: z.enum(['company', 'research paper', 'people']).optional().describe(\"Filter results to a specific category - 'company': company websites and profiles, 'research paper': academic papers and research, 'people': LinkedIn profiles and personal bios\"),\n    },\n    {\n      readOnlyHint: true,\n      destructiveHint: false,\n      idempotentHint: true\n    },\n    async ({ query, numResults, livecrawl, type, category }) => {\n      const requestId = `web_search_exa-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`;\n      const logger = createRequestLogger(requestId, 'web_search_exa');\n      \n      logger.start(query);\n      \n      try {\n        // Create a fresh axios instance for each request\n        const axiosInstance = axios.create({\n          baseURL: API_CONFIG.BASE_URL,\n          headers: {\n            'accept': 'application/json',\n            'content-type': 'application/json',\n            'x-api-key': config?.exaApiKey || process.env.EXA_API_KEY || '',\n            'x-exa-integration': 'web-search-mcp'\n          },\n          timeout: 25000\n        });\n\n        const searchRequest: ExaSearchRequest = {\n          query,\n          type: type || \"auto\",\n          numResults: numResults || API_CONFIG.DEFAULT_NUM_RESULTS,\n          ...(category && { category }),\n          contents: {\n            highlights: true,\n            livecrawl: livecrawl || 'fallback'\n          }\n        };\n        \n        checkpoint('web_search_request_prepared');\n        logger.log(\"Sending request to Exa API\");\n        \n        const response = await axiosInstance.post<ExaSearchResponse>(\n          API_CONFIG.ENDPOINTS.SEARCH,\n          searchRequest,\n          { timeout: 25000 }\n        );\n        \n        checkpoint('exa_search_response_received');\n        logger.log(\"Received response from Exa API\");\n\n        if (!response.data || !response.data.results || response.data.results.length === 0) {\n          logger.log(\"Warning: Empty or invalid response from Exa API\");\n          checkpoint('web_search_complete');\n          return {\n            content: [{\n              type: \"text\" as const,\n              text: \"No search results found. Please try a different query.\"\n            }]\n          };\n        }\n\n        logger.log(`Received ${response.data.results.length} results with highlights`);\n\n        const sanitized = sanitizeSearchResponse(response.data);\n        const results = Array.isArray(sanitized.results) ? sanitized.results : [];\n\n        const formattedResults = results.map((r) => {\n          const highlights = Array.isArray(r.highlights) ? r.highlights.join('\\n') : '';\n          const lines = [\n            `Title: ${r.title || 'N/A'}`,\n            `URL: ${r.url}`,\n            `Published: ${r.publishedDate || 'N/A'}`,\n            `Author: ${r.author || 'N/A'}`,\n            `Highlights:\\n${highlights}`,\n          ];\n          return lines.join('\\n');\n        }).join('\\n\\n---\\n\\n');\n\n        const searchTime = typeof sanitized.searchTime === 'number' ? sanitized.searchTime : undefined;\n        const header = searchTime != null ? `Search Time: ${searchTime}ms\\n\\n` : '';\n        \n        const result = {\n          content: [{\n            type: \"text\" as const,\n            text: header + formattedResults\n          }]\n        };\n        \n        checkpoint('web_search_complete');\n        logger.complete();\n        return result;\n      } catch (error) {\n        logger.error(error);\n        \n        // Check for rate limit error on free MCP\n        const rateLimitResult = handleRateLimitError(error, config?.userProvidedApiKey, 'web_search_exa');\n        if (rateLimitResult) {\n          return rateLimitResult;\n        }\n        \n        if (axios.isAxiosError(error)) {\n          // Handle Axios errors specifically\n          const statusCode = error.response?.status || 'unknown';\n          const errorMessage = error.response?.data?.message || error.message;\n          \n          logger.log(`Axios error (${statusCode}): ${errorMessage}`);\n          return {\n            content: [{\n              type: \"text\" as const,\n              text: `Search error (${statusCode}): ${errorMessage}`\n            }],\n            isError: true,\n          };\n        }\n        \n        // Handle generic errors\n        return {\n          content: [{\n            type: \"text\" as const,\n            text: `Search error: ${error instanceof Error ? error.message : String(error)}`\n          }],\n          isError: true,\n        };\n      }\n    }\n  );\n}                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                "
  },
  {
    "path": "src/tools/webSearchAdvanced.ts",
    "content": "import { z } from \"zod\";\nimport axios from \"axios\";\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { API_CONFIG } from \"./config.js\";\nimport { ExaAdvancedSearchRequest, ExaSearchResponse } from \"../types.js\";\nimport { createRequestLogger } from \"../utils/logger.js\";\nimport { handleRateLimitError } from \"../utils/errorHandler.js\";\nimport { sanitizeSearchResponse } from \"../utils/exaResponseSanitizer.js\";\nimport { checkpoint } from \"agnost\";\n\nexport function registerWebSearchAdvancedTool(server: McpServer, config?: { exaApiKey?: string; userProvidedApiKey?: boolean }): void {\n  server.tool(\n    \"web_search_advanced_exa\",\n    `Advanced web search with full control over filters, domains, dates, and content options.\n\nBest for: When you need specific filters like date ranges, domain restrictions, or category filters.\nNot recommended for: Simple searches - use web_search_exa instead.\nReturns: Search results with optional highlights, summaries, and subpage content.`,\n    {\n      query: z.string().describe(\"Search query - can be a question, statement, or keywords\"),\n      numResults: z.coerce.number().optional().describe(\"Number of results (must be a number, 1-100, default: 10)\"),\n      type: z.enum(['auto', 'fast', 'neural']).optional().describe(\"Search type - 'auto': balanced (default), 'fast': quick results, 'neural': semantic search\"),\n\n      category: z.enum(['company', 'research paper', 'news', 'pdf', 'github', 'tweet', 'personal site', 'people', 'financial report']).optional().describe(\"Filter results to a specific category\"),\n\n      includeDomains: z.array(z.string()).optional().describe(\"Only include results from these domains (e.g., ['arxiv.org', 'github.com'])\"),\n      excludeDomains: z.array(z.string()).optional().describe(\"Exclude results from these domains\"),\n\n      startPublishedDate: z.string().optional().describe(\"Only include results published after this date (ISO 8601: YYYY-MM-DD)\"),\n      endPublishedDate: z.string().optional().describe(\"Only include results published before this date (ISO 8601: YYYY-MM-DD)\"),\n      startCrawlDate: z.string().optional().describe(\"Only include results crawled after this date (ISO 8601: YYYY-MM-DD)\"),\n      endCrawlDate: z.string().optional().describe(\"Only include results crawled before this date (ISO 8601: YYYY-MM-DD)\"),\n\n      includeText: z.array(z.string()).optional().describe(\"Only include results containing ALL of these text strings\"),\n      excludeText: z.array(z.string()).optional().describe(\"Exclude results containing ANY of these text strings\"),\n\n      userLocation: z.string().optional().describe(\"ISO country code for geo-targeted results (e.g., 'US', 'GB', 'DE')\"),\n\n      moderation: z.boolean().optional().describe(\"Filter out unsafe/inappropriate content\"),\n\n      additionalQueries: z.array(z.string()).optional().describe(\"Additional query variations to expand search coverage\"),\n\n      textMaxCharacters: z.coerce.number().optional().describe(\"Max characters for text extraction per result (must be a number)\"),\n      contextMaxCharacters: z.coerce.number().optional().describe(\"Max characters for context string (must be a number, not included by default)\"),\n\n      enableSummary: z.boolean().optional().describe(\"Enable summary generation for results\"),\n      summaryQuery: z.string().optional().describe(\"Focus query for summary generation\"),\n\n      enableHighlights: z.boolean().optional().describe(\"Enable highlights extraction\"),\n      highlightsNumSentences: z.coerce.number().optional().describe(\"Number of sentences per highlight (must be a number)\"),\n      highlightsPerUrl: z.coerce.number().optional().describe(\"Number of highlights per URL (must be a number)\"),\n      highlightsQuery: z.string().optional().describe(\"Query for highlight relevance\"),\n\n      livecrawl: z.enum(['never', 'fallback', 'always', 'preferred']).optional().describe(\"Live crawl mode - 'never': only cached, 'fallback': cached then live, 'always': always live, 'preferred': prefer live (default: 'fallback')\"),\n      livecrawlTimeout: z.coerce.number().optional().describe(\"Timeout for live crawl in milliseconds (must be a number)\"),\n\n      subpages: z.coerce.number().optional().describe(\"Number of subpages to crawl from each result (must be a number, 1-10)\"),\n      subpageTarget: z.array(z.string()).optional().describe(\"Keywords to target when selecting subpages\"),\n    },\n    {\n      readOnlyHint: true,\n      destructiveHint: false,\n      idempotentHint: true\n    },\n    async (params) => {\n      const requestId = `web_search_advanced_exa-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`;\n      const logger = createRequestLogger(requestId, 'web_search_advanced_exa');\n\n      logger.start(params.query);\n\n      try {\n        const axiosInstance = axios.create({\n          baseURL: API_CONFIG.BASE_URL,\n          headers: {\n            'accept': 'application/json',\n            'content-type': 'application/json',\n            'x-api-key': config?.exaApiKey || process.env.EXA_API_KEY || '',\n            'x-exa-integration': 'web-search-advanced-mcp'\n          },\n          timeout: params.livecrawlTimeout || 30000\n        });\n\n        const contents: ExaAdvancedSearchRequest['contents'] = {\n          text: params.textMaxCharacters ? { maxCharacters: params.textMaxCharacters } : true,\n          livecrawl: params.livecrawl || 'fallback',\n        };\n\n        if (params.contextMaxCharacters) {\n          contents.context = { maxCharacters: params.contextMaxCharacters };\n        }\n\n        if (params.livecrawlTimeout) {\n          contents.livecrawlTimeout = params.livecrawlTimeout;\n        }\n\n        if (params.enableSummary) {\n          contents.summary = params.summaryQuery ? { query: params.summaryQuery } : true;\n        }\n\n        if (params.enableHighlights) {\n          contents.highlights = {\n            numSentences: params.highlightsNumSentences,\n            highlightsPerUrl: params.highlightsPerUrl,\n            query: params.highlightsQuery,\n          };\n        }\n\n        if (params.subpages) {\n          contents.subpages = params.subpages;\n        }\n\n        if (params.subpageTarget) {\n          contents.subpageTarget = params.subpageTarget;\n        }\n\n        const searchRequest: ExaAdvancedSearchRequest = {\n          query: params.query,\n          type: params.type || 'auto',\n          numResults: params.numResults || 10,\n          contents,\n        };\n\n        if (params.category) {\n          searchRequest.category = params.category;\n        }\n\n        if (params.includeDomains && params.includeDomains.length > 0) {\n          searchRequest.includeDomains = params.includeDomains;\n        }\n\n        if (params.excludeDomains && params.excludeDomains.length > 0) {\n          searchRequest.excludeDomains = params.excludeDomains;\n        }\n\n        if (params.startPublishedDate) {\n          searchRequest.startPublishedDate = params.startPublishedDate;\n        }\n\n        if (params.endPublishedDate) {\n          searchRequest.endPublishedDate = params.endPublishedDate;\n        }\n\n        if (params.startCrawlDate) {\n          searchRequest.startCrawlDate = params.startCrawlDate;\n        }\n\n        if (params.endCrawlDate) {\n          searchRequest.endCrawlDate = params.endCrawlDate;\n        }\n\n        if (params.includeText && params.includeText.length > 0) {\n          searchRequest.includeText = params.includeText;\n        }\n\n        if (params.excludeText && params.excludeText.length > 0) {\n          searchRequest.excludeText = params.excludeText;\n        }\n\n        if (params.userLocation) {\n          searchRequest.userLocation = params.userLocation;\n        }\n\n        if (params.moderation !== undefined) {\n          searchRequest.moderation = params.moderation;\n        }\n\n        if (params.additionalQueries && params.additionalQueries.length > 0) {\n          searchRequest.additionalQueries = params.additionalQueries;\n        }\n\n        checkpoint('web_search_advanced_request_prepared');\n        logger.log(\"Sending advanced search request to Exa API\");\n\n        const response = await axiosInstance.post<ExaSearchResponse>(\n          API_CONFIG.ENDPOINTS.SEARCH,\n          searchRequest,\n          { timeout: params.livecrawlTimeout || 30000 }\n        );\n\n        checkpoint('exa_advanced_search_response_received');\n        logger.log(\"Received response from Exa API\");\n\n        if (!response.data) {\n          logger.log(\"Warning: Empty response from Exa API\");\n          checkpoint('web_search_advanced_complete');\n          return {\n            content: [{\n              type: \"text\" as const,\n              text: \"No search results found. Please try a different query or adjust your filters.\"\n            }]\n          };\n        }\n\n        const resultText = JSON.stringify(sanitizeSearchResponse(response.data));\n        logger.log(`Response prepared with ${resultText.length} characters`);\n\n        const result = {\n          content: [{\n            type: \"text\" as const,\n            text: resultText\n          }]\n        };\n\n        checkpoint('web_search_advanced_complete');\n        logger.complete();\n        return result;\n      } catch (error) {\n        logger.error(error);\n\n        // Check for rate limit error on free MCP\n        const rateLimitResult = handleRateLimitError(error, config?.userProvidedApiKey, 'web_search_advanced_exa');\n        if (rateLimitResult) {\n          return rateLimitResult;\n        }\n\n        if (axios.isAxiosError(error)) {\n          const statusCode = error.response?.status || 'unknown';\n          const errorMessage = error.response?.data?.message || error.message;\n\n          logger.log(`Axios error (${statusCode}): ${errorMessage}`);\n          return {\n            content: [{\n              type: \"text\" as const,\n              text: `Advanced search error (${statusCode}): ${errorMessage}`\n            }],\n            isError: true,\n          };\n        }\n\n        return {\n          content: [{\n            type: \"text\" as const,\n            text: `Advanced search error: ${error instanceof Error ? error.message : String(error)}`\n          }],\n          isError: true,\n        };\n      }\n    }\n  );\n}\n"
  },
  {
    "path": "src/types.ts",
    "content": "// Exa API Types\nexport interface ExaSearchRequest {\n  query: string;\n  type: 'auto' | 'fast' | 'deep' | 'deep-reasoning';\n  category?: 'company' | 'research paper' | 'news' | 'pdf' | 'github' | 'tweet' | 'personal site' | 'people' | 'financial report';\n  includeDomains?: string[];\n  excludeDomains?: string[];\n  startPublishedDate?: string;\n  endPublishedDate?: string;\n  numResults?: number;\n  additionalQueries?: string[];\n  outputSchema?: Record<string, unknown>;\n  contents: {\n    text?: {\n      maxCharacters?: number;\n    } | boolean;\n    highlights?: {\n      maxCharacters?: number;\n      numSentences?: number;\n      highlightsPerUrl?: number;\n      query?: string;\n    } | boolean;\n    summary?: {\n      query?: string;\n    } | boolean;\n    livecrawl?: 'fallback' | 'preferred';\n    subpages?: number;\n    subpageTarget?: string[];\n  };\n}\n\nexport interface ExaAdvancedSearchRequest {\n  query: string;\n  type: 'auto' | 'fast' | 'neural';\n  numResults?: number;\n  category?: 'company' | 'research paper' | 'news' | 'pdf' | 'github' | 'tweet' | 'personal site' | 'people' | 'financial report';\n  includeDomains?: string[];\n  excludeDomains?: string[];\n  startPublishedDate?: string;\n  endPublishedDate?: string;\n  startCrawlDate?: string;\n  endCrawlDate?: string;\n  includeText?: string[];\n  excludeText?: string[];\n  userLocation?: string;\n  moderation?: boolean;\n  additionalQueries?: string[];\n  contents: {\n    text?: {\n      maxCharacters?: number;\n    } | boolean;\n    context?: {\n      maxCharacters?: number;\n    } | boolean;\n    summary?: {\n      query?: string;\n    } | boolean;\n    highlights?: {\n      numSentences?: number;\n      highlightsPerUrl?: number;\n      query?: string;\n    };\n    livecrawl?: 'never' | 'fallback' | 'always' | 'preferred';\n    livecrawlTimeout?: number;\n    subpages?: number;\n    subpageTarget?: string[];\n  };\n}\n\nexport interface ExaSearchResult {\n  id?: string;\n  title?: string | null;\n  url?: string;\n  publishedDate?: string;\n  author?: string;\n  text?: string;\n  summary?: string;\n  highlights?: string[];\n  highlightScores?: number[];\n  image?: string;\n  favicon?: string;\n  score?: number;\n  entities?: Record<string, unknown>[];\n  extras?: {\n    links?: string[];\n    imageLinks?: string[];\n  };\n  subpages?: ExaSearchResult[];\n}\n\nexport interface ExaSearchStatus {\n  id: string;\n  status: string;\n  source: string;\n}\n\nexport interface ExaCostDollars {\n  total: number;\n  search?: Record<string, number>;\n  contents?: Record<string, number>;\n}\n\nexport interface ExaSearchResponse {\n  requestId: string;\n  autopromptString?: string;\n  autoDate?: string;\n  resolvedSearchType: string;\n  context?: string;\n  output?: {\n    content: string | Record<string, unknown>;\n    grounding?: Array<{\n      field: string;\n      citations: Array<{\n        url: string;\n        title: string;\n      }>;\n      confidence: string;\n    }>;\n  };\n  statuses?: ExaSearchStatus[];\n  results: ExaSearchResult[];\n  searchTime?: number;\n  costDollars?: ExaCostDollars;\n}\n\n// Deep Search API Types\nexport interface ExaDeepSearchRequest {\n  query: string;\n  type: 'deep' | 'deep-reasoning';\n  numResults?: number;\n  additionalQueries?: string[];\n  outputSchema?: Record<string, unknown>;\n  systemPrompt?: string;\n  contents: {\n    highlights?: {\n      maxCharacters?: number;\n      numSentences?: number;\n      highlightsPerUrl?: number;\n      query?: string;\n    };\n  };\n}\n\nexport interface ExaDeepSearchResponse {\n  requestId: string;\n  autopromptString?: string;\n  autoDate?: string;\n  resolvedSearchType: string;\n  output?: {\n    content: string | Record<string, unknown>;\n    grounding?: Array<{\n      field: string;\n      citations: Array<{\n        url: string;\n        title: string;\n      }>;\n      confidence: string;\n    }>;\n  };\n  statuses?: ExaSearchStatus[];\n  results: ExaSearchResult[];\n  searchTime?: number;\n  costDollars?: ExaCostDollars;\n}\n\nexport interface ExaContentsResponse {\n  requestId?: string;\n  results?: ExaSearchResult[];\n  statuses?: ExaSearchStatus[];\n  costDollars?: ExaCostDollars;\n}\n\n// Deep Research API Types (v1)\nexport interface DeepResearchRequest {\n  model: 'exa-research-fast' | 'exa-research' | 'exa-research-pro';\n  instructions: string;\n  outputSchema?: Record<string, unknown>;\n}\n\nexport interface DeepResearchStartResponse {\n  researchId: string;\n  createdAt: number;\n  model: string;\n  instructions: string;\n  outputSchema?: Record<string, unknown>;\n  status: string;\n}\n\nexport interface DeepResearchCheckResponse {\n  researchId: string;\n  createdAt: number;\n  model: string;\n  instructions: string;\n  outputSchema?: Record<string, unknown>;\n  finishedAt?: number;\n  status: 'pending' | 'running' | 'completed' | 'canceled' | 'failed';\n  output?: {\n    content: string;\n    parsed?: Record<string, unknown>;\n  };\n  citations?: Array<{\n    id: string;\n    url: string;\n    title: string;\n  }>;\n  costDollars?: {\n    total: number;\n    numSearches: number;\n    numPages: number;\n    reasoningTokens: number;\n  };\n}\n\nexport interface DeepResearchErrorResponse {\n  response: {\n    message: string;\n    error: string;\n    statusCode: number;\n  };\n  status: number;\n  options: any;\n  message: string;\n  name: string;\n}\n\n// Exa Code API Types\nexport interface ExaCodeRequest {\n  query: string;\n  tokensNum: number;\n  flags?: string[];\n}\n\nexport interface ExaCodeResponse {\n  requestId: string;\n  query: string;\n  repository?: string;\n  response: string;\n  resultsCount: number;\n  costDollars: string;\n  searchTime: number;\n  outputTokens?: number;\n  traces?: any;\n}\n"
  },
  {
    "path": "src/utils/errorHandler.ts",
    "content": "/**\n * Error handling utilities for Exa MCP server.\n * Provides rate limit detection and user-friendly error messages for free MCP users.\n */\nimport axios from \"axios\";\n\nconst FREE_MCP_RATE_LIMIT_MESSAGE = `You've hit Exa's free MCP rate limit. To continue using without limits, create your own Exa API key.\n\nFix: Create API key at https://dashboard.exa.ai/api-keys , and then update Exa MCP URL to this https://mcp.exa.ai/mcp?exaApiKey=YOUR_EXA_API_KEY`;\n\n/**\n * Checks if an Axios error is a rate limit error (HTTP 429) and if the user is using the free MCP.\n * Returns a user-friendly error message if both conditions are met.\n * \n * @param error - The error to check\n * @param userProvidedApiKey - Whether the user provided their own API key via URL parameter\n * @param toolName - The name of the tool that encountered the error (for logging)\n */\nexport function handleRateLimitError(\n  error: unknown,\n  userProvidedApiKey: boolean | undefined,\n  toolName: string\n): { content: Array<{ type: \"text\"; text: string }>; isError: true } | null {\n  if (!axios.isAxiosError(error)) {\n    return null;\n  }\n\n  const statusCode = error.response?.status;\n  const isRateLimited = statusCode === 429;\n  const isUsingFreeMcp = !userProvidedApiKey;\n\n  if (isRateLimited && isUsingFreeMcp) {\n    return {\n      content: [\n        {\n          type: \"text\" as const,\n          text: FREE_MCP_RATE_LIMIT_MESSAGE,\n        },\n      ],\n      isError: true,\n    };\n  }\n\n  return null;\n}\n"
  },
  {
    "path": "src/utils/exaResponseSanitizer.ts",
    "content": "import { ExaDeepSearchResponse, ExaSearchResponse } from \"../types.js\";\n\nconst SENSITIVE_RESPONSE_KEYS = new Set([\"requestTags\"]);\n\nfunction isRecord(value: unknown): value is Record<string, unknown> {\n  return typeof value === \"object\" && value !== null && !Array.isArray(value);\n}\n\nfunction sanitizeStringArray(value: unknown): string[] | undefined {\n  if (!Array.isArray(value)) {\n    return undefined;\n  }\n\n  const sanitized = value.filter((item): item is string => typeof item === \"string\");\n  return sanitized.length > 0 ? sanitized : undefined;\n}\n\nfunction sanitizeNumberArray(value: unknown): number[] | undefined {\n  if (!Array.isArray(value)) {\n    return undefined;\n  }\n\n  const sanitized = value.filter((item): item is number => typeof item === \"number\");\n  return sanitized.length > 0 ? sanitized : undefined;\n}\n\nfunction sanitizeObjectArray(value: unknown): Record<string, unknown>[] | undefined {\n  if (!Array.isArray(value)) {\n    return undefined;\n  }\n\n  const sanitized = value\n    .map((item) => stripSensitiveKeys(item))\n    .filter((item): item is Record<string, unknown> => isRecord(item));\n\n  return sanitized.length > 0 ? sanitized : undefined;\n}\n\nfunction sanitizeStatuses(value: unknown): Array<{ id: string; status: string; source: string }> | undefined {\n  if (!Array.isArray(value)) {\n    return undefined;\n  }\n\n  const sanitized = value\n    .map((status) => {\n      if (!isRecord(status)) {\n        return null;\n      }\n\n      const { id, status: state, source } = status;\n      if (typeof id !== \"string\" || typeof state !== \"string\" || typeof source !== \"string\") {\n        return null;\n      }\n\n      return { id, status: state, source };\n    })\n    .filter((status): status is { id: string; status: string; source: string } => status !== null);\n\n  return sanitized.length > 0 ? sanitized : undefined;\n}\n\nfunction sanitizeExtras(value: unknown): { links?: string[]; imageLinks?: string[] } | undefined {\n  if (!isRecord(value)) {\n    return undefined;\n  }\n\n  const sanitized: { links?: string[]; imageLinks?: string[] } = {};\n\n  const links = sanitizeStringArray(value.links);\n  if (links) {\n    sanitized.links = links;\n  }\n\n  const imageLinks = sanitizeStringArray(value.imageLinks);\n  if (imageLinks) {\n    sanitized.imageLinks = imageLinks;\n  }\n\n  return Object.keys(sanitized).length > 0 ? sanitized : undefined;\n}\n\nfunction sanitizeSearchOutput(value: unknown): Record<string, unknown> | undefined {\n  if (!isRecord(value)) {\n    return undefined;\n  }\n\n  const sanitized: Record<string, unknown> = {};\n\n  if (\"content\" in value) {\n    sanitized.content = stripSensitiveKeys(value.content);\n  }\n\n  if (Array.isArray(value.grounding)) {\n    const grounding = value.grounding\n      .map((entry) => {\n        if (!isRecord(entry)) {\n          return null;\n        }\n\n        const citations = Array.isArray(entry.citations)\n          ? entry.citations\n              .map((citation) => {\n                if (!isRecord(citation)) {\n                  return null;\n                }\n\n                const { url, title } = citation;\n                if (typeof url !== \"string\" || typeof title !== \"string\") {\n                  return null;\n                }\n\n                return { url, title };\n              })\n              .filter((citation): citation is { url: string; title: string } => citation !== null)\n          : [];\n\n        const result: Record<string, unknown> = { citations };\n\n        if (typeof entry.field === \"string\") {\n          result.field = entry.field;\n        }\n\n        if (typeof entry.confidence === \"string\") {\n          result.confidence = entry.confidence;\n        }\n\n        return result;\n      })\n      .filter((entry): entry is Record<string, unknown> => entry !== null);\n\n    if (grounding.length > 0) {\n      sanitized.grounding = grounding;\n    }\n  }\n\n  return Object.keys(sanitized).length > 0 ? sanitized : undefined;\n}\n\nexport function stripSensitiveKeys(value: unknown): unknown {\n  if (Array.isArray(value)) {\n    return value.map((item) => stripSensitiveKeys(item));\n  }\n\n  if (!isRecord(value)) {\n    return value;\n  }\n\n  const sanitized: Record<string, unknown> = {};\n\n  for (const [key, nestedValue] of Object.entries(value)) {\n    if (SENSITIVE_RESPONSE_KEYS.has(key)) {\n      continue;\n    }\n\n    sanitized[key] = stripSensitiveKeys(nestedValue);\n  }\n\n  return sanitized;\n}\n\nexport function sanitizeSearchResult(value: unknown): Record<string, unknown> | null {\n  if (!isRecord(value)) {\n    return null;\n  }\n\n  const sanitized: Record<string, unknown> = {};\n\n  const stringFields = [\"id\", \"url\", \"publishedDate\", \"author\", \"text\", \"summary\", \"image\", \"favicon\"] as const;\n  for (const field of stringFields) {\n    if (typeof value[field] === \"string\") {\n      sanitized[field] = value[field];\n    }\n  }\n\n  if (typeof value.title === \"string\" || value.title === null) {\n    sanitized.title = value.title;\n  }\n\n  if (typeof value.score === \"number\") {\n    sanitized.score = value.score;\n  }\n\n  const highlights = sanitizeStringArray(value.highlights);\n  if (highlights) {\n    sanitized.highlights = highlights;\n  }\n\n  const highlightScores = sanitizeNumberArray(value.highlightScores);\n  if (highlightScores) {\n    sanitized.highlightScores = highlightScores;\n  }\n\n  const entities = sanitizeObjectArray(value.entities);\n  if (entities) {\n    sanitized.entities = entities;\n  }\n\n  const extras = sanitizeExtras(value.extras);\n  if (extras) {\n    sanitized.extras = extras;\n  }\n\n  if (Array.isArray(value.subpages)) {\n    const subpages = value.subpages\n      .map((subpage) => sanitizeSearchResult(subpage))\n      .filter((subpage): subpage is Record<string, unknown> => subpage !== null);\n\n    if (subpages.length > 0) {\n      sanitized.subpages = subpages;\n    }\n  }\n\n  return sanitized;\n}\n\nfunction sanitizeSearchResults(value: unknown): Record<string, unknown>[] | undefined {\n  if (!Array.isArray(value)) {\n    return undefined;\n  }\n\n  const sanitized = value\n    .map((result) => sanitizeSearchResult(result))\n    .filter((result): result is Record<string, unknown> => result !== null);\n\n  return sanitized.length > 0 ? sanitized : undefined;\n}\n\nfunction sanitizeTopLevelResponse(value: unknown): Record<string, unknown> {\n  if (!isRecord(value)) {\n    return {};\n  }\n\n  const sanitized: Record<string, unknown> = {};\n\n  if (typeof value.requestId === \"string\") {\n    sanitized.requestId = value.requestId;\n  }\n\n  if (typeof value.autopromptString === \"string\") {\n    sanitized.autopromptString = value.autopromptString;\n  }\n\n  if (typeof value.autoDate === \"string\") {\n    sanitized.autoDate = value.autoDate;\n  }\n\n  if (typeof value.resolvedSearchType === \"string\") {\n    sanitized.resolvedSearchType = value.resolvedSearchType;\n  }\n\n  if (typeof value.context === \"string\") {\n    sanitized.context = value.context;\n  }\n\n  const output = sanitizeSearchOutput(value.output);\n  if (output) {\n    sanitized.output = output;\n  }\n\n  const statuses = sanitizeStatuses(value.statuses);\n  if (statuses) {\n    sanitized.statuses = statuses;\n  }\n\n  const results = sanitizeSearchResults(value.results);\n  if (results) {\n    sanitized.results = results;\n  }\n\n  if (typeof value.searchTime === \"number\") {\n    sanitized.searchTime = value.searchTime;\n  }\n\n  const costDollars = stripSensitiveKeys(value.costDollars);\n  if (isRecord(costDollars)) {\n    sanitized.costDollars = costDollars;\n  }\n\n  return sanitized;\n}\n\nexport function sanitizeSearchResponse(response: ExaSearchResponse | unknown): Record<string, unknown> {\n  return sanitizeTopLevelResponse(response);\n}\n\nexport function sanitizeDeepSearchStructuredResponse(response: ExaDeepSearchResponse | unknown): Record<string, unknown> {\n  const sanitized = sanitizeTopLevelResponse(response);\n  const structured: Record<string, unknown> = {};\n\n  if (\"output\" in sanitized && isRecord(sanitized.output)) {\n    const output = { ...sanitized.output };\n    if (isRecord(response) && isRecord((response as Record<string, unknown>).output)) {\n      output.content = ((response as Record<string, unknown>).output as Record<string, unknown>).content;\n    }\n    structured.output = output;\n  }\n\n  if (\"results\" in sanitized) {\n    structured.results = sanitized.results;\n  }\n\n  if (\"searchTime\" in sanitized) {\n    structured.searchTime = sanitized.searchTime;\n  }\n\n  if (\"costDollars\" in sanitized) {\n    structured.costDollars = sanitized.costDollars;\n  }\n\n  return structured;\n}\n\nexport function sanitizeContentsResponse(response: unknown): Record<string, unknown> {\n  return sanitizeTopLevelResponse(response);\n}\n"
  },
  {
    "path": "src/utils/logger.ts",
    "content": "/**\n * Simple logging utility for MCP server\n */\nexport const log = (message: string): void => {\n  console.error(`[EXA-MCP-DEBUG] ${message}`);\n};\n\nexport const createRequestLogger = (requestId: string, toolName: string) => {\n  return {\n    log: (message: string): void => {\n      log(`[${requestId}] [${toolName}] ${message}`);\n    },\n    start: (query: string): void => {\n      log(`[${requestId}] [${toolName}] Starting search for query: \"${query}\"`);\n    },\n    error: (error: unknown): void => {\n      log(`[${requestId}] [${toolName}] Error: ${error instanceof Error ? error.message : String(error)}`);\n    },\n    complete: (): void => {\n      log(`[${requestId}] [${toolName}] Successfully completed request`);\n    }\n  };\n}; "
  },
  {
    "path": "tsconfig.json",
    "content": "{\n  \"compilerOptions\": {\n    \"target\": \"ES2022\",\n    \"module\": \"Node16\",\n    \"moduleResolution\": \"Node16\",\n    \"outDir\": \"./dist\",\n    \"rootDir\": \"./\",\n    \"strict\": true,\n    \"esModuleInterop\": true,\n    \"skipLibCheck\": true,\n    \"forceConsistentCasingInFileNames\": true,\n    \"declaration\": true,\n    \"declarationMap\": true,\n    \"resolveJsonModule\": true,\n    \"allowJs\": true\n  },\n  \"include\": [\"src/**/*\", \"api/**/*\"],\n  \"exclude\": [\"node_modules\", \".smithery\", \"build\", \"dist\"]\n}\n"
  },
  {
    "path": "vercel.json",
    "content": "{\n  \"functions\": {\n    \"api/mcp.ts\": {\n      \"maxDuration\": 60,\n      \"memory\": 1024\n    }\n  },\n  \"rewrites\": [\n    {\n      \"source\": \"/\",\n      \"destination\": \"/api/mcp\"\n    },\n    {\n      \"source\": \"/mcp\",\n      \"destination\": \"/api/mcp\"\n    },\n    {\n      \"source\": \"/.well-known/mcp-config\",\n      \"destination\": \"/api/well-known-mcp-config\"\n    }\n  ],\n  \"buildCommand\": \"npm run build:vercel\",\n  \"outputDirectory\": \"dist\"\n}\n\n"
  }
]