[
  {
    "path": ".gitattributes",
    "content": "package-lock.json linguist-generated=true\n"
  },
  {
    "path": ".github/pull_request_template.md",
    "content": "<!-- Provide a brief description of your changes -->\n\n## Description\n\n## Publishing Your Server\n\n**Note: We are no longer accepting PRs to add servers to the README.** Instead, please publish your server to the [MCP Server Registry](https://github.com/modelcontextprotocol/registry) to make it discoverable to the MCP ecosystem.\n\nTo publish your server, follow the [quickstart guide](https://github.com/modelcontextprotocol/registry/blob/main/docs/modelcontextprotocol-io/quickstart.mdx). You can browse published servers at [https://registry.modelcontextprotocol.io/](https://registry.modelcontextprotocol.io/).\n\n## Server Details\n<!-- If modifying an existing server, provide details -->\n- Server: <!-- e.g., filesystem, github -->\n- Changes to: <!-- e.g., tools, resources, prompts -->\n\n## Motivation and Context\n<!-- Why is this change needed? What problem does it solve? -->\n\n## How Has This Been Tested?\n<!-- Have you tested this with an LLM client? Which scenarios were tested? -->\n\n## Breaking Changes\n<!-- Will users need to update their MCP client configurations? -->\n\n## Types of changes\n<!-- What types of changes does your code introduce? Put an `x` in all the boxes that apply: -->\n- [ ] Bug fix (non-breaking change which fixes an issue)\n- [ ] New feature (non-breaking change which adds functionality)\n- [ ] Breaking change (fix or feature that would cause existing functionality to change)\n- [ ] Documentation update\n\n## Checklist\n<!-- Go over all the following points, and put an `x` in all the boxes that apply. -->\n- [ ] I have read the [MCP Protocol Documentation](https://modelcontextprotocol.io)\n- [ ] My changes follows MCP security best practices\n- [ ] I have updated the server's README accordingly\n- [ ] I have tested this with an LLM client\n- [ ] My code follows the repository's style guidelines\n- [ ] New and existing tests pass locally\n- [ ] I have added appropriate error handling\n- [ ] I have documented all environment variables and configuration options\n\n## Additional context\n<!-- Add any other context, implementation notes, or design decisions -->\n"
  },
  {
    "path": ".github/workflows/claude.yml",
    "content": "name: Claude Code\n\non:\n  issue_comment:\n    types: [created]\n  pull_request_review_comment:\n    types: [created]\n  issues:\n    types: [opened, assigned]\n  pull_request_review:\n    types: [submitted]\n\njobs:\n  claude:\n    if: |\n      (github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||\n      (github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||\n      (github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||\n      (github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))\n    runs-on: ubuntu-latest\n    permissions:\n      contents: read\n      pull-requests: read\n      issues: read\n      id-token: write\n      actions: read\n    steps:\n      - name: Checkout repository\n        uses: actions/checkout@v6\n        with:\n          fetch-depth: 1\n\n      - name: Run Claude Code\n        id: claude\n        uses: anthropics/claude-code-action@v1\n        with:\n          anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}\n\n          # Allow Claude to read CI results on PRs\n          additional_permissions: |\n            actions: read\n\n          # Trigger when assigned to an issue\n          assignee_trigger: \"claude\"\n\n          claude_args: |\n            --mcp-config .mcp.json\n            --allowedTools \"Bash,mcp__mcp-docs,WebFetch\"\n            --append-system-prompt \"If posting a comment to GitHub, give a concise summary of the comment at the top and put all the details in a <details> block. When working on MCP-related code or reviewing MCP-related changes, use the mcp-docs MCP server to look up the latest protocol documentation. For schema details, reference https://github.com/modelcontextprotocol/modelcontextprotocol/tree/main/schema which contains versioned schemas in JSON (schema.json) and TypeScript (schema.ts) formats.\"\n"
  },
  {
    "path": ".github/workflows/python.yml",
    "content": "name: Python\n\non:\n  push:\n    branches:\n      - main\n  pull_request:\n  release:\n    types: [published]\n\njobs:\n  detect-packages:\n    runs-on: ubuntu-latest\n    outputs:\n      packages: ${{ steps.find-packages.outputs.packages }}\n    steps:\n      - uses: actions/checkout@v6\n\n      - name: Find Python packages\n        id: find-packages\n        working-directory: src\n        run: |\n          PACKAGES=$(find . -name pyproject.toml -exec dirname {} \\; | sed 's/^\\.\\///' | jq -R -s -c 'split(\"\\n\")[:-1]')\n          echo \"packages=$PACKAGES\" >> $GITHUB_OUTPUT\n\n  test:\n    needs: [detect-packages]\n    strategy:\n      matrix:\n        package: ${{ fromJson(needs.detect-packages.outputs.packages) }}\n    name: Test ${{ matrix.package }}\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v6\n\n      - name: Install uv\n        uses: astral-sh/setup-uv@v3\n\n      - name: Set up Python\n        uses: actions/setup-python@v6\n        with:\n          python-version-file: \"src/${{ matrix.package }}/.python-version\"\n\n      - name: Install dependencies\n        working-directory: src/${{ matrix.package }}\n        run: uv sync --frozen --all-extras --dev\n\n      - name: Check if tests exist\n        id: check-tests\n        working-directory: src/${{ matrix.package }}\n        run: |\n          if [ -d \"tests\" ] || [ -d \"test\" ] || grep -q \"pytest\" pyproject.toml; then\n            echo \"has-tests=true\" >> $GITHUB_OUTPUT\n          else\n            echo \"has-tests=false\" >> $GITHUB_OUTPUT\n          fi\n\n      - name: Run tests\n        if: steps.check-tests.outputs.has-tests == 'true'\n        working-directory: src/${{ matrix.package }}\n        run: uv run pytest\n\n  build:\n    needs: [detect-packages, test]\n    strategy:\n      matrix:\n        package: ${{ fromJson(needs.detect-packages.outputs.packages) }}\n    name: Build ${{ matrix.package }}\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v6\n\n      - name: Install uv\n        uses: astral-sh/setup-uv@v3\n\n      - name: Set up Python\n        uses: actions/setup-python@v6\n        with:\n          python-version-file: \"src/${{ matrix.package }}/.python-version\"\n\n      - name: Install dependencies\n        working-directory: src/${{ matrix.package }}\n        run: uv sync --locked --all-extras --dev\n\n      - name: Run pyright\n        working-directory: src/${{ matrix.package }}\n        run: uv run --frozen pyright\n\n      - name: Build package\n        working-directory: src/${{ matrix.package }}\n        run: uv build\n\n      - name: Upload artifacts\n        uses: actions/upload-artifact@v6\n        with:\n          name: dist-${{ matrix.package }}\n          path: src/${{ matrix.package }}/dist/\n\n  publish:\n    runs-on: ubuntu-latest\n    needs: [build, detect-packages]\n    if: github.event_name == 'release'\n\n    strategy:\n      matrix:\n        package: ${{ fromJson(needs.detect-packages.outputs.packages) }}\n    name: Publish ${{ matrix.package }}\n\n    environment: release\n    permissions:\n      id-token: write # Required for trusted publishing\n\n    steps:\n      - name: Download artifacts\n        uses: actions/download-artifact@v7\n        with:\n          name: dist-${{ matrix.package }}\n          path: dist/\n\n      - name: Publish package to PyPI\n        uses: pypa/gh-action-pypi-publish@release/v1\n"
  },
  {
    "path": ".github/workflows/readme-pr-check.yml",
    "content": "name: README PR Check\n\non:\n  pull_request:\n    types: [opened]\n    paths:\n      - 'README.md'\n  issue_comment:\n    types: [created]\n\njobs:\n  check-readme-only:\n    if: github.event_name == 'pull_request'\n    runs-on: ubuntu-latest\n    permissions:\n      contents: read\n      pull-requests: write\n    steps:\n      - name: Check files and comment if README-only\n        uses: actions/github-script@v8\n        with:\n          script: |\n            const { owner, repo } = context.repo;\n            const prNumber = context.payload.pull_request.number;\n\n            const { data: files } = await github.rest.pulls.listFiles({ owner, repo, pull_number: prNumber });\n\n            if (files.length !== 1 || files[0].filename !== 'README.md') {\n              console.log('PR modifies files other than README, skipping');\n              return;\n            }\n\n            // Check if we've already commented\n            const { data: comments } = await github.rest.issues.listComments({ owner, repo, issue_number: prNumber });\n            if (comments.some(c => c.user.login === 'github-actions[bot]' && c.body.includes('no longer accepting PRs to add new servers'))) {\n              console.log('Already commented on this PR, skipping');\n              return;\n            }\n\n            await github.rest.issues.addLabels({ owner, repo, issue_number: prNumber, labels: ['readme: pending'] });\n\n            await github.rest.issues.createComment({\n              owner,\n              repo,\n              issue_number: prNumber,\n              body: [\n                'Thanks for your contribution!',\n                '',\n                '**We are no longer accepting PRs to add new servers to the README.** The server lists are deprecated and will eventually be removed entirely, replaced by the registry.',\n                '',\n                '👉 **To add a new MCP server:** Please publish it to the [MCP Server Registry](https://github.com/modelcontextprotocol/registry) instead. You can browse published servers at [registry.modelcontextprotocol.io](https://registry.modelcontextprotocol.io/).',\n                '',\n                '👉 **If this PR updates or removes an existing entry:** We do still accept these changes. Please reply with `/i-promise-this-is-not-a-new-server` to continue.',\n                '',\n                'If this PR is adding a new server, please close it and submit to the registry instead.',\n              ].join('\\n'),\n            });\n\n  handle-confirmation:\n    if: github.event_name == 'issue_comment' && github.event.issue.pull_request && contains(github.event.comment.body, '/i-promise-this-is-not-a-new-server')\n    runs-on: ubuntu-latest\n    permissions:\n      pull-requests: write\n    steps:\n      - name: Swap labels and minimize comments\n        uses: actions/github-script@v8\n        with:\n          script: |\n            const { owner, repo } = context.repo;\n            const prNumber = context.payload.issue.number;\n\n            // Check if pending label exists\n            const { data: labels } = await github.rest.issues.listLabelsOnIssue({ owner, repo, issue_number: prNumber });\n            if (!labels.some(l => l.name === 'readme: pending')) {\n              console.log('No pending label found, skipping');\n              return;\n            }\n\n            // Swap labels\n            try {\n              await github.rest.issues.removeLabel({ owner, repo, issue_number: prNumber, name: 'readme: pending' });\n            } catch (e) {}\n            await github.rest.issues.addLabels({ owner, repo, issue_number: prNumber, labels: ['readme: ready for review'] });\n\n            // Find the bot's original comment\n            const { data: comments } = await github.rest.issues.listComments({ owner, repo, issue_number: prNumber });\n            const botComment = comments.find(c =>\n              c.user.login === 'github-actions[bot]' &&\n              c.body.includes('no longer accepting PRs to add new servers')\n            );\n\n            // Minimize both comments via GraphQL\n            const minimizeComment = async (nodeId) => {\n              await github.graphql(`\n                mutation($id: ID!) {\n                  minimizeComment(input: {subjectId: $id, classifier: RESOLVED}) {\n                    minimizedComment { isMinimized }\n                  }\n                }\n              `, { id: nodeId });\n            };\n\n            if (botComment) {\n              await minimizeComment(botComment.node_id);\n            }\n\n            // Only minimize user's comment if it's just the command\n            const userComment = context.payload.comment.body.trim();\n            if (userComment === '/i-promise-this-is-not-a-new-server') {\n              await minimizeComment(context.payload.comment.node_id);\n            }\n"
  },
  {
    "path": ".github/workflows/release.yml",
    "content": "name: Automatic Release Creation\n\non:\n  workflow_dispatch:\n  schedule:\n    - cron: '0 10 * * *'\n\njobs:\n  create-metadata:\n    runs-on: ubuntu-latest\n    if: github.repository_owner == 'modelcontextprotocol'\n    outputs:\n      hash: ${{ steps.last-release.outputs.hash }}\n      version: ${{ steps.create-version.outputs.version}}\n      npm_packages: ${{ steps.create-npm-packages.outputs.npm_packages}}\n      pypi_packages: ${{ steps.create-pypi-packages.outputs.pypi_packages}}\n    steps:\n      - uses: actions/checkout@v6\n        with:\n          fetch-depth: 0\n\n      - name: Get last release hash\n        id: last-release\n        run: |\n          HASH=$(git rev-list --tags --max-count=1 || echo \"HEAD~1\")\n          echo \"hash=${HASH}\" >> $GITHUB_OUTPUT\n          echo \"Using last release hash: ${HASH}\"\n\n      - name: Install uv\n        uses: astral-sh/setup-uv@v5\n\n      - name: Create version name\n        id: create-version\n        run: |\n          VERSION=$(uv run --script scripts/release.py generate-version)\n          echo \"version $VERSION\"\n          echo \"version=$VERSION\" >> $GITHUB_OUTPUT\n\n      - name: Create notes\n        run: |\n          HASH=\"${{ steps.last-release.outputs.hash }}\"\n          uv run --script scripts/release.py generate-notes --directory src/ $HASH > RELEASE_NOTES.md\n          cat RELEASE_NOTES.md\n\n      - name: Release notes\n        uses: actions/upload-artifact@v6\n        with:\n          name: release-notes\n          path: RELEASE_NOTES.md\n\n      - name: Create python matrix\n        id: create-pypi-packages\n        run: |\n          HASH=\"${{ steps.last-release.outputs.hash }}\"\n          PYPI=$(uv run --script scripts/release.py generate-matrix --pypi --directory src $HASH)\n          echo \"pypi_packages $PYPI\"\n          echo \"pypi_packages=$PYPI\" >> $GITHUB_OUTPUT\n\n      - name: Create npm matrix\n        id: create-npm-packages\n        run: |\n          HASH=\"${{ steps.last-release.outputs.hash }}\"\n          NPM=$(uv run --script scripts/release.py generate-matrix --npm --directory src $HASH)\n          echo \"npm_packages $NPM\"\n          echo \"npm_packages=$NPM\" >> $GITHUB_OUTPUT\n\n  update-packages:\n    needs: [create-metadata]\n    if: ${{ needs.create-metadata.outputs.npm_packages != '[]' || needs.create-metadata.outputs.pypi_packages != '[]' }}\n    runs-on: ubuntu-latest\n    environment: release\n    permissions:\n      contents: write\n    outputs:\n      changes_made: ${{ steps.commit.outputs.changes_made }}\n    steps:\n      - uses: actions/checkout@v6\n        with:\n          fetch-depth: 0\n\n      - name: Install uv\n        uses: astral-sh/setup-uv@v5\n\n      - name: Update packages\n        run: |\n          HASH=\"${{ needs.create-metadata.outputs.hash }}\"\n          uv run --script scripts/release.py update-packages --directory src/ $HASH\n\n      - name: Configure git\n        run: |\n          git config --global user.name \"GitHub Actions\"\n          git config --global user.email \"actions@github.com\"\n\n      - name: Commit changes\n        id: commit\n        run: |\n          VERSION=\"${{ needs.create-metadata.outputs.version }}\"\n          git add -u\n          if git diff-index --quiet HEAD; then\n            echo \"changes_made=false\" >> $GITHUB_OUTPUT\n          else\n            git commit -m 'Automatic update of packages'\n            git tag -a \"$VERSION\" -m \"Release $VERSION\"\n            git push origin \"$VERSION\"\n            echo \"changes_made=true\" >> $GITHUB_OUTPUT\n          fi\n\n  publish-pypi:\n    needs: [update-packages, create-metadata]\n    if: ${{ needs.create-metadata.outputs.pypi_packages != '[]' && needs.create-metadata.outputs.pypi_packages != '' }}\n    strategy:\n      fail-fast: false\n      matrix:\n        package: ${{ fromJson(needs.create-metadata.outputs.pypi_packages) }}\n    name: Build ${{ matrix.package }}\n    environment: release\n    permissions:\n      id-token: write # Required for trusted publishing\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v6\n        with:\n          ref: ${{ needs.create-metadata.outputs.version }}\n\n      - name: Install uv\n        uses: astral-sh/setup-uv@v5\n\n      - name: Set up Python\n        uses: actions/setup-python@v6\n        with:\n          python-version-file: \"src/${{ matrix.package }}/.python-version\"\n\n      - name: Install dependencies\n        working-directory: src/${{ matrix.package }}\n        run: uv sync --frozen --all-extras --dev\n\n      - name: Run pyright\n        working-directory: src/${{ matrix.package }}\n        run: uv run --frozen pyright\n\n      - name: Build package\n        working-directory: src/${{ matrix.package }}\n        run: uv build\n\n      - name: Publish package to PyPI\n        uses: pypa/gh-action-pypi-publish@release/v1\n        with:\n          packages-dir: src/${{ matrix.package }}/dist\n\n  publish-npm:\n    needs: [update-packages, create-metadata]\n    if: ${{ needs.create-metadata.outputs.npm_packages != '[]' && needs.create-metadata.outputs.npm_packages != '' }}\n    strategy:\n      fail-fast: false\n      matrix:\n        package: ${{ fromJson(needs.create-metadata.outputs.npm_packages) }}\n    name: Build ${{ matrix.package }}\n    environment: release\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v6\n        with:\n          ref: ${{ needs.create-metadata.outputs.version }}\n\n      - uses: actions/setup-node@v6\n        with:\n          node-version: 22\n          cache: npm\n          registry-url: 'https://registry.npmjs.org'\n\n      - name: Install dependencies\n        working-directory: src/${{ matrix.package }}\n        run: npm ci\n\n      - name: Check if version exists on npm\n        working-directory: src/${{ matrix.package }}\n        run: |\n          VERSION=$(jq -r .version package.json)\n          if npm view --json | jq -e --arg version \"$VERSION\" '[.[]][0].versions | contains([$version])'; then\n            echo \"Version $VERSION already exists on npm\"\n            exit 1\n          fi\n          echo \"Version $VERSION is new, proceeding with publish\"\n\n      - name: Build package\n        working-directory: src/${{ matrix.package }}\n        run: npm run build\n\n      - name: Publish package\n        working-directory: src/${{ matrix.package }}\n        run: |\n          npm publish --access public\n        env:\n          NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}\n\n  create-release:\n    needs: [update-packages, create-metadata, publish-pypi, publish-npm]\n    if: |\n      always() &&\n      needs.update-packages.outputs.changes_made == 'true' &&\n      (needs.publish-pypi.result == 'success' || needs.publish-npm.result == 'success')\n    runs-on: ubuntu-latest\n    environment: release\n    permissions:\n      contents: write\n    steps:\n      - uses: actions/checkout@v6\n\n      - name: Download release notes\n        uses: actions/download-artifact@v7\n        with:\n          name: release-notes\n\n      - name: Create release\n        env:\n          GH_TOKEN: ${{ secrets.GITHUB_TOKEN}}\n        run: |\n          VERSION=\"${{ needs.create-metadata.outputs.version }}\"\n          gh release create \"$VERSION\" \\\n            --title \"Release $VERSION\" \\\n            --notes-file RELEASE_NOTES.md\n\n"
  },
  {
    "path": ".github/workflows/typescript.yml",
    "content": "name: TypeScript\n\non:\n  push:\n    branches:\n      - main\n  pull_request:\n  release:\n    types: [published]\n\njobs:\n  detect-packages:\n    runs-on: ubuntu-latest\n    outputs:\n      packages: ${{ steps.find-packages.outputs.packages }}\n    steps:\n      - uses: actions/checkout@v6\n      - name: Find JS packages\n        id: find-packages\n        working-directory: src\n        run: |\n          PACKAGES=$(find . -name package.json -not -path \"*/node_modules/*\" -exec dirname {} \\; | sed 's/^\\.\\///' | jq -R -s -c 'split(\"\\n\")[:-1]')\n          echo \"packages=$PACKAGES\" >> $GITHUB_OUTPUT\n\n  test:\n    needs: [detect-packages]\n    strategy:\n      matrix:\n        package: ${{ fromJson(needs.detect-packages.outputs.packages) }}\n    name: Test ${{ matrix.package }}\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v6\n\n      - uses: actions/setup-node@v6\n        with:\n          node-version: 22\n          cache: npm\n\n      - name: Install dependencies\n        working-directory: src/${{ matrix.package }}\n        run: npm ci\n\n      - name: Run tests\n        working-directory: src/${{ matrix.package }}\n        run: npm test --if-present\n\n  build:\n    needs: [detect-packages, test]\n    strategy:\n      matrix:\n        package: ${{ fromJson(needs.detect-packages.outputs.packages) }}\n    name: Build ${{ matrix.package }}\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v6\n\n      - uses: actions/setup-node@v6\n        with:\n          node-version: 22\n          cache: npm\n\n      - name: Install dependencies\n        working-directory: src/${{ matrix.package }}\n        run: npm ci\n\n      - name: Build package\n        working-directory: src/${{ matrix.package }}\n        run: npm run build\n\n  publish:\n    runs-on: ubuntu-latest\n    needs: [build, detect-packages]\n    if: github.event_name == 'release'\n    environment: release\n\n    strategy:\n      matrix:\n        package: ${{ fromJson(needs.detect-packages.outputs.packages) }}\n    name: Publish ${{ matrix.package }}\n\n    permissions:\n      contents: read\n      id-token: write\n\n    steps:\n      - uses: actions/checkout@v6\n      - uses: actions/setup-node@v6\n        with:\n          node-version: 22\n          cache: npm\n          registry-url: \"https://registry.npmjs.org\"\n\n      - name: Install dependencies\n        working-directory: src/${{ matrix.package }}\n        run: npm ci\n\n      - name: Publish package\n        working-directory: src/${{ matrix.package }}\n        run: npm publish --access public\n        env:\n          NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}\n"
  },
  {
    "path": ".gitignore",
    "content": "# Logs\nlogs\n*.log\nnpm-debug.log*\nyarn-debug.log*\nyarn-error.log*\nlerna-debug.log*\n.pnpm-debug.log*\n\n# Diagnostic reports (https://nodejs.org/api/report.html)\nreport.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json\n\n# Runtime data\npids\n*.pid\n*.seed\n*.pid.lock\n\n# Directory for instrumented libs generated by jscoverage/JSCover\nlib-cov\n\n# Coverage directory used by tools like istanbul\ncoverage\n*.lcov\n\n# nyc test coverage\n.nyc_output\n\n# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)\n.grunt\n\n# Bower dependency directory (https://bower.io/)\nbower_components\n\n# node-waf configuration\n.lock-wscript\n\n# Compiled binary addons (https://nodejs.org/api/addons.html)\nbuild/Release\n\n# Dependency directories\nnode_modules/\njspm_packages/\n\n# Snowpack dependency directory (https://snowpack.dev/)\nweb_modules/\n\n# TypeScript cache\n*.tsbuildinfo\n\n# Optional npm cache directory\n.npm\n\n# Optional eslint cache\n.eslintcache\n\n# Optional stylelint cache\n.stylelintcache\n\n# Microbundle cache\n.rpt2_cache/\n.rts2_cache_cjs/\n.rts2_cache_es/\n.rts2_cache_umd/\n\n# Optional REPL history\n.node_repl_history\n\n# Output of 'npm pack'\n*.tgz\n\n# Yarn Integrity file\n.yarn-integrity\n\n# dotenv environment variable files\n.env\n.env.development.local\n.env.test.local\n.env.production.local\n.env.local\n\n# parcel-bundler cache (https://parceljs.org/)\n.cache\n.parcel-cache\n\n# Next.js build output\n.next\nout\n\n# Nuxt.js build / generate output\n.nuxt\ndist\n\n# Gatsby files\n.cache/\n# Comment in the public line in if your project uses Gatsby and not Next.js\n# https://nextjs.org/blog/next-9-1#public-directory-support\n# public\n\n# vuepress build output\n.vuepress/dist\n\n# vuepress v2.x temp and cache directory\n.temp\n.cache\n\n# Docusaurus cache and generated files\n.docusaurus\n\n# Serverless directories\n.serverless/\n\n# FuseBox cache\n.fusebox/\n\n# DynamoDB Local files\n.dynamodb/\n\n# TernJS port file\n.tern-port\n\n# Stores VSCode versions used for testing VSCode extensions\n.vscode-test\n\n# IDEs\n.idea/\n.vscode/\n\n# yarn v2\n.yarn/cache\n.yarn/unplugged\n.yarn/build-state.yml\n.yarn/install-state.gz\n.pnp.*\n\nbuild/\n\ngcp-oauth.keys.json\n.*-server-credentials.json\n\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.nox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n*.py,cover\n.hypothesis/\n.pytest_cache/\ncover/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\ndb.sqlite3-journal\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\n.pybuilder/\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# IPython\nprofile_default/\nipython_config.py\n\n# pyenv\n#   For a library or package, you might want to ignore these files since the code is\n#   intended to run in multiple environments; otherwise, check them in:\n# .python-version\n\n# pipenv\n#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.\n#   However, in case of collaboration, if having platform-specific dependencies or dependencies\n#   having no cross-platform support, pipenv may install dependencies that don't work, or not\n#   install all needed dependencies.\n#Pipfile.lock\n\n# poetry\n#   Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.\n#   This is especially recommended for binary packages to ensure reproducibility, and is more\n#   commonly ignored for libraries.\n#   https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control\n#poetry.lock\n\n# pdm\n#   Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.\n#pdm.lock\n#   pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it\n#   in version control.\n#   https://pdm.fming.dev/latest/usage/project/#working-with-version-control\n.pdm.toml\n.pdm-python\n.pdm-build/\n\n# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm\n__pypackages__/\n\n# Celery stuff\ncelerybeat-schedule\ncelerybeat.pid\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n.dmypy.json\ndmypy.json\n\n# Pyre type checker\n.pyre/\n\n# pytype static type analyzer\n.pytype/\n\n# Cython debug symbols\ncython_debug/\n\n.DS_Store\n\n# PyCharm\n#  JetBrains specific template is maintained in a separate JetBrains.gitignore that can\n#  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore\n#  and can be added to the global gitignore or merged into this file.  For a more nuclear\n#  option (not recommended) you can uncomment the following to ignore the entire idea folder.\n#.idea/\n.claude/settings.local.json\n"
  },
  {
    "path": ".mcp.json",
    "content": "{\n  \"mcpServers\": {\n    \"mcp-docs\": {\n      \"type\": \"http\",\n      \"url\": \"https://modelcontextprotocol.io/mcp\"\n    }\n  }\n}\n"
  },
  {
    "path": ".npmrc",
    "content": "registry=\"https://registry.npmjs.org/\"\n@modelcontextprotocol:registry=\"https://registry.npmjs.org/\"\n"
  },
  {
    "path": "CODE_OF_CONDUCT.md",
    "content": "# Contributor Covenant Code of Conduct\n\n## Our Pledge\n\nWe as members, contributors, and leaders pledge to make participation in our\ncommunity a harassment-free experience for everyone, regardless of age, body\nsize, visible or invisible disability, ethnicity, sex characteristics, gender\nidentity and expression, level of experience, education, socio-economic status,\nnationality, personal appearance, race, religion, or sexual identity\nand orientation.\n\nWe pledge to act and interact in ways that contribute to an open, welcoming,\ndiverse, inclusive, and healthy community.\n\n## Our Standards\n\nExamples of behavior that contributes to a positive environment for our\ncommunity include:\n\n* Demonstrating empathy and kindness toward other people\n* Being respectful of differing opinions, viewpoints, and experiences\n* Giving and gracefully accepting constructive feedback\n* Accepting responsibility and apologizing to those affected by our mistakes,\n  and learning from the experience\n* Focusing on what is best not just for us as individuals, but for the\n  overall community\n\nExamples of unacceptable behavior include:\n\n* The use of sexualized language or imagery, and sexual attention or\n  advances of any kind\n* Trolling, insulting or derogatory comments, and personal or political attacks\n* Public or private harassment\n* Publishing others' private information, such as a physical or email\n  address, without their explicit permission\n* Other conduct which could reasonably be considered inappropriate in a\n  professional setting\n\n## Enforcement Responsibilities\n\nCommunity leaders are responsible for clarifying and enforcing our standards of\nacceptable behavior and will take appropriate and fair corrective action in\nresponse to any behavior that they deem inappropriate, threatening, offensive,\nor harmful.\n\nCommunity leaders have the right and responsibility to remove, edit, or reject\ncomments, commits, code, wiki edits, issues, and other contributions that are\nnot aligned to this Code of Conduct, and will communicate reasons for moderation\ndecisions when appropriate.\n\n## Scope\n\nThis Code of Conduct applies within all community spaces, and also applies when\nan individual is officially representing the community in public spaces.\nExamples of representing our community include using an official e-mail address,\nposting via an official social media account, or acting as an appointed\nrepresentative at an online or offline event.\n\n## Enforcement\n\nInstances of abusive, harassing, or otherwise unacceptable behavior may be\nreported to the community leaders responsible for enforcement at\nmcp-coc@anthropic.com.\nAll complaints will be reviewed and investigated promptly and fairly.\n\nAll community leaders are obligated to respect the privacy and security of the\nreporter of any incident.\n\n## Enforcement Guidelines\n\nCommunity leaders will follow these Community Impact Guidelines in determining\nthe consequences for any action they deem in violation of this Code of Conduct:\n\n### 1. Correction\n\n**Community Impact**: Use of inappropriate language or other behavior deemed\nunprofessional or unwelcome in the community.\n\n**Consequence**: A private, written warning from community leaders, providing\nclarity around the nature of the violation and an explanation of why the\nbehavior was inappropriate. A public apology may be requested.\n\n### 2. Warning\n\n**Community Impact**: A violation through a single incident or series\nof actions.\n\n**Consequence**: A warning with consequences for continued behavior. No\ninteraction with the people involved, including unsolicited interaction with\nthose enforcing the Code of Conduct, for a specified period of time. This\nincludes avoiding interactions in community spaces as well as external channels\nlike social media. Violating these terms may lead to a temporary or\npermanent ban.\n\n### 3. Temporary Ban\n\n**Community Impact**: A serious violation of community standards, including\nsustained inappropriate behavior.\n\n**Consequence**: A temporary ban from any sort of interaction or public\ncommunication with the community for a specified period of time. No public or\nprivate interaction with the people involved, including unsolicited interaction\nwith those enforcing the Code of Conduct, is allowed during this period.\nViolating these terms may lead to a permanent ban.\n\n### 4. Permanent Ban\n\n**Community Impact**: Demonstrating a pattern of violation of community\nstandards, including sustained inappropriate behavior,  harassment of an\nindividual, or aggression toward or disparagement of classes of individuals.\n\n**Consequence**: A permanent ban from any sort of public interaction within\nthe community.\n\n## Attribution\n\nThis Code of Conduct is adapted from the [Contributor Covenant][homepage],\nversion 2.0, available at\nhttps://www.contributor-covenant.org/version/2/0/code_of_conduct.html.\n\nCommunity Impact Guidelines were inspired by [Mozilla's code of conduct\nenforcement ladder](https://github.com/mozilla/diversity).\n\n[homepage]: https://www.contributor-covenant.org\n\nFor answers to common questions about this code of conduct, see the FAQ at\nhttps://www.contributor-covenant.org/faq. Translations are available at\nhttps://www.contributor-covenant.org/translations.\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "# Contributing to MCP Servers\n\nThanks for your interest in contributing! Here's how you can help make this repo better.\n\nWe accept changes through [the standard GitHub flow model](https://docs.github.com/en/get-started/using-github/github-flow).\n\n## Server Listings\n\nWe are **no longer accepting PRs** to add server links to the README. Please publish your server to the [MCP Server Registry](https://github.com/modelcontextprotocol/registry) instead. Follow the [quickstart guide](https://github.com/modelcontextprotocol/registry/blob/main/docs/modelcontextprotocol-io/quickstart.mdx).\n\nYou can browse published servers using the simple UI at [https://registry.modelcontextprotocol.io/](https://registry.modelcontextprotocol.io/).\n\n## Server Implementations\n\nWe welcome:\n- **Bug fixes** — Help us squash those pesky bugs.\n- **Usability improvements** — Making servers easier to use for humans and agents.\n- **Enhancements that demonstrate MCP protocol features** — We encourage contributions that help reference servers better illustrate underutilized aspects of the MCP protocol beyond just Tools, such as Resources, Prompts, or Roots. For example, adding Roots support to filesystem-server helps showcase this important but lesser-known feature.\n\nWe're more selective about:\n- **Other new features** — Especially if they're not crucial to the server's core purpose or are highly opinionated. The existing servers are reference servers meant to inspire the community. If you need specific features, we encourage you to build enhanced versions and publish them to the [MCP Server Registry](https://github.com/modelcontextprotocol/registry)! We think a diverse ecosystem of servers is beneficial for everyone.\n\nWe don't accept:\n- **New server implementations** — We encourage you to publish them to the [MCP Server Registry](https://github.com/modelcontextprotocol/registry) instead.\n\n## Testing\n\nWhen adding or configuring tests for servers implemented in TypeScript, use **vitest** as the test framework. Vitest provides better ESM support, faster test execution, and a more modern testing experience.\n\n## Documentation\n\nImprovements to existing documentation is welcome - although generally we'd prefer ergonomic improvements than documenting pain points if possible!\n\nWe're more selective about adding wholly new documentation, especially in ways that aren't vendor neutral (e.g. how to run a particular server with a particular client).\n\n## Community\n\n[Learn how the MCP community communicates](https://modelcontextprotocol.io/community/communication).\n\nThank you for helping make MCP servers better for everyone!"
  },
  {
    "path": "LICENSE",
    "content": "The MCP project is undergoing a licensing transition from the MIT License to the Apache License, Version 2.0 (\"Apache-2.0\"). All new code and specification contributions to the project are licensed under Apache-2.0. Documentation contributions (excluding specifications) are licensed under CC-BY-4.0.\n\nContributions for which relicensing consent has been obtained are licensed under Apache-2.0. Contributions made by authors who originally licensed their work under the MIT License and who have not yet granted explicit permission to relicense remain licensed under the MIT License.\n\nNo rights beyond those granted by the applicable original license are conveyed for such contributions.\n\n---\n\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to the Licensor for inclusion in the Work by the copyright\n      owner or by an individual or Legal Entity authorized to submit on behalf\n      of the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n---\n\nMIT License\n\nCopyright (c) 2024-2025 Model Context Protocol a Series of LF Projects, LLC.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n---\n\nCreative Commons Attribution 4.0 International (CC-BY-4.0)\n\nDocumentation in this project (excluding specifications) is licensed under\nCC-BY-4.0. See https://creativecommons.org/licenses/by/4.0/legalcode for\nthe full license text.\n"
  },
  {
    "path": "README.md",
    "content": "# Model Context Protocol servers\n\nThis repository is a collection of *reference implementations* for the [Model Context Protocol](https://modelcontextprotocol.io/) (MCP), as well as references to community-built servers and additional resources.\n\n> [!IMPORTANT]\n> If you are looking for a list of MCP servers, you can browse published servers on [the MCP Registry](https://registry.modelcontextprotocol.io/). The repository served by this README is dedicated to housing just the small number of reference servers maintained by the MCP steering group.\n\n> [!WARNING]\n> The servers in this repository are intended as **reference implementations** to demonstrate MCP features and SDK usage. They are meant to serve as educational examples for developers building their own MCP servers, not as production-ready solutions. Developers should evaluate their own security requirements and implement appropriate safeguards based on their specific threat model and use case.\n\nThe servers in this repository showcase the versatility and extensibility of MCP, demonstrating how it can be used to give Large Language Models (LLMs) secure, controlled access to tools and data sources.\nTypically, each MCP server is implemented with an MCP SDK:\n\n- [C# MCP SDK](https://github.com/modelcontextprotocol/csharp-sdk)\n- [Go MCP SDK](https://github.com/modelcontextprotocol/go-sdk)\n- [Java MCP SDK](https://github.com/modelcontextprotocol/java-sdk)\n- [Kotlin MCP SDK](https://github.com/modelcontextprotocol/kotlin-sdk)\n- [PHP MCP SDK](https://github.com/modelcontextprotocol/php-sdk)\n- [Python MCP SDK](https://github.com/modelcontextprotocol/python-sdk)\n- [Ruby MCP SDK](https://github.com/modelcontextprotocol/ruby-sdk)\n- [Rust MCP SDK](https://github.com/modelcontextprotocol/rust-sdk)\n- [Swift MCP SDK](https://github.com/modelcontextprotocol/swift-sdk)\n- [TypeScript MCP SDK](https://github.com/modelcontextprotocol/typescript-sdk)\n\n## 🌟 Reference Servers\n\nThese servers aim to demonstrate MCP features and the official SDKs.\n\n- **[Everything](src/everything)** - Reference / test server with prompts, resources, and tools.\n- **[Fetch](src/fetch)** - Web content fetching and conversion for efficient LLM usage.\n- **[Filesystem](src/filesystem)** - Secure file operations with configurable access controls.\n- **[Git](src/git)** - Tools to read, search, and manipulate Git repositories.\n- **[Memory](src/memory)** - Knowledge graph-based persistent memory system.\n- **[Sequential Thinking](src/sequentialthinking)** - Dynamic and reflective problem-solving through thought sequences.\n- **[Time](src/time)** - Time and timezone conversion capabilities.\n\n### Archived\n\nThe following reference servers are now archived and can be found at [servers-archived](https://github.com/modelcontextprotocol/servers-archived).\n\n- **[AWS KB Retrieval](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/aws-kb-retrieval-server)** - Retrieval from AWS Knowledge Base using Bedrock Agent Runtime.\n- **[Brave Search](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/brave-search)** - Web and local search using Brave's Search API.  Has been replaced by the [official server](https://github.com/brave/brave-search-mcp-server).\n- **[EverArt](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/everart)** - AI image generation using various models.\n- **[GitHub](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/github)** - Repository management, file operations, and GitHub API integration.\n- **[GitLab](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/gitlab)** - GitLab API, enabling project management.\n- **[Google Drive](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/gdrive)** - File access and search capabilities for Google Drive.\n- **[Google Maps](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/google-maps)** - Location services, directions, and place details.\n- **[PostgreSQL](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/postgres)** - Read-only database access with schema inspection.\n- **[Puppeteer](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/puppeteer)** - Browser automation and web scraping.\n- **[Redis](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/redis)** - Interact with Redis key-value stores.\n- **[Sentry](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/sentry)** - Retrieving and analyzing issues from Sentry.io.\n- **[Slack](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/slack)** - Channel management and messaging capabilities. Now maintained by [Zencoder](https://github.com/zencoderai/slack-mcp-server)\n- **[SQLite](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/sqlite)** - Database interaction and business intelligence capabilities.\n\n## 🤝 Third-Party Servers\n\n> [!NOTE]\nThe server lists in this README are no longer maintained and will eventually be removed.\n\n### 🎖️ Official Integrations\n\nOfficial integrations are maintained by companies building production ready MCP servers for their platforms.\n\n- <img height=\"12\" width=\"12\" src=\"https://www.21st.dev/favicon.ico\" alt=\"21st.dev Logo\" /> **[21st.dev Magic](https://github.com/21st-dev/magic-mcp)** - Create crafted UI components inspired by the best 21st.dev design engineers.\n- <img height=\"12\" width=\"12\" src=\"https://www.2slides.com/images/2slides-red.svg\" alt=\"2slides Logo\" /> **[2slides](https://github.com/2slides/2slides-mcp)** - An MCP server that provides tools to convert content into slides/PPT/presentation or generate slides/PPT/presentation with user intention.\n- <img height=\"12\" width=\"12\" src=\"https://framerusercontent.com/images/LpSK1tSZweomrAHOMAj9Gea96lA.svg\" alt=\"Paragon Logo\" /> **[ActionKit by Paragon](https://github.com/useparagon/paragon-mcp)** - Connect to 130+ SaaS integrations (e.g. Slack, Salesforce, Gmail) with Paragon’s [ActionKit](https://www.useparagon.com/actionkit) API.\n- <img height=\"12\" width=\"12\" src=\"https://invoxx-public-bucket.s3.eu-central-1.amazonaws.com/frontend-resources/adfin-logo-small.svg\" alt=\"Adfin Logo\" /> **[Adfin](https://github.com/Adfin-Engineering/mcp-server-adfin)** - The only platform you need to get paid - all payments in one place, invoicing and accounting reconciliations with [Adfin](https://www.adfin.com/).\n- <img height=\"12\" width=\"12\" src=\"https://github.com/AgentOps-AI/agentops/blob/main/docs/favicon.png\" alt=\"AgentOps Logo\" /> **[AgentOps](https://github.com/AgentOps-AI/agentops-mcp)** - Provide observability and tracing for debugging AI agents with [AgentOps](https://www.agentops.ai/) API.\n- <img height=\"12\" width=\"12\" src=\"https://www.agentql.com/favicon/favicon.png\" alt=\"AgentQL Logo\" /> **[AgentQL](https://github.com/tinyfish-io/agentql-mcp)** - Enable AI agents to get structured data from unstructured web with [AgentQL](https://www.agentql.com/).\n- <img height=\"12\" width=\"12\" src=\"https://agentrpc.com/favicon.ico\" alt=\"AgentRPC Logo\" /> **[AgentRPC](https://github.com/agentrpc/agentrpc)** - Connect to any function, any language, across network boundaries using [AgentRPC](https://www.agentrpc.com/).\n- **[Agentset](https://github.com/agentset-ai/mcp-server)** - RAG for your knowledge base connected to [Agentset](https://agentset.ai).\n- <img height=\"12\" width=\"12\" src=\"https://www.airwallex.com/favicon.ico\" alt=\"Airwallex Logo\" /> **[Airwallex Developer](https://www.npmjs.com/package/@airwallex/developer-mcp)** - Empowers AI coding agents with the tools they need to assist developers integrating with [Airwallex APIs](https://www.airwallex.com/docs/api/)\n- <img height=\"12\" width=\"12\" src=\"https://aiven.io/favicon.ico\" alt=\"Aiven Logo\" /> **[Aiven](https://github.com/Aiven-Open/mcp-aiven)** - Navigate your [Aiven projects](https://go.aiven.io/mcp-server) and interact with the PostgreSQL®, Apache Kafka®, ClickHouse® and OpenSearch® services\n- <img height=\"12\" width=\"12\" src=\"https://www.alation.com/resource-center/download/7p3vnbbznfiw/34FMtBTex5ppvs2hNYa9Fc/c877c37e88e5339878658697c46d2d58/Alation-Logo-Bug-Primary.svg\" alt=\"Alation Logo\" /> **[Alation](https://github.com/Alation/alation-ai-agent-sdk)** - Unlock the power of the enterprise Data Catalog by harnessing tools provided by the Alation MCP server.\n- <img height=\"12\" width=\"12\" src=\"https://i.postimg.cc/5NYw9qjS/alby-icon-head-yellow-500x500.png\" alt=\"Alby Logo\" /> **[Alby Bitcoin Payments](https://github.com/getAlby/mcp)** - Connect any bitcoin lightning wallet to your agent to send and receive instant payments globally with your agent.\n- **[Algolia](https://github.com/algolia/mcp)** - Use AI agents to provision, configure, and query your [Algolia](https://algolia.com) search indices.\n- <img height=\"12\" width=\"12\" src=\"https://img.alicdn.com/imgextra/i4/O1CN01epkXwH1WLAXkZfV6N_!!6000000002771-2-tps-200-200.png\" alt=\"Alibaba Cloud AnalyticDB for MySQL Logo\" /> **[Alibaba Cloud AnalyticDB for MySQL](https://github.com/aliyun/alibabacloud-adb-mysql-mcp-server)** - Connect to an [AnalyticDB for MySQL](https://www.alibabacloud.com/en/product/analyticdb-for-mysql) cluster for getting database or table metadata, querying and analyzing data. It will be supported to add the OpenAPI for cluster operation in the future.\n- <img height=\"12\" width=\"12\" src=\"https://github.com/aliyun/alibabacloud-adbpg-mcp-server/blob/master/images/AnalyticDB.png\" alt=\"Alibaba Cloud AnalyticDB for PostgreSQL Logo\" /> **[Alibaba Cloud AnalyticDB for PostgreSQL](https://github.com/aliyun/alibabacloud-adbpg-mcp-server)** - An MCP server to connect to [AnalyticDB for PostgreSQL](https://github.com/aliyun/alibabacloud-adbpg-mcp-server) instances, query and analyze data.\n- <img height=\"12\" width=\"12\" src=\"https://img.alicdn.com/imgextra/i3/O1CN0101UWWF1UYn3rAe3HU_!!6000000002530-2-tps-32-32.png\" alt=\"DataWorks Logo\" /> **[Alibaba Cloud DataWorks](https://github.com/aliyun/alibabacloud-dataworks-mcp-server)** - A Model Context Protocol (MCP) server that provides tools for AI, allowing it to interact with the [DataWorks](https://www.alibabacloud.com/help/en/dataworks/) Open API through a standardized interface. This implementation is based on the Alibaba Cloud Open API and enables AI agents to perform cloud resources operations seamlessly.\n- <img height=\"12\" width=\"12\" src=\"https://opensearch-shanghai.oss-cn-shanghai.aliyuncs.com/ouhuang/aliyun-icon.png\" alt=\"Alibaba Cloud OpenSearch Logo\" /> **[Alibaba Cloud OpenSearch](https://github.com/aliyun/alibabacloud-opensearch-mcp-server)** - This MCP server equips AI Agents with tools to interact with [OpenSearch](https://help.aliyun.com/zh/open-search/?spm=5176.7946605.J_5253785160.6.28098651AaYZXC) through a standardized and extensible interface.\n- <img height=\"12\" width=\"12\" src=\"https://github.com/aliyun/alibaba-cloud-ops-mcp-server/blob/master/image/alibaba-cloud.png\" alt=\"Alibaba Cloud OPS Logo\" /> **[Alibaba Cloud OPS](https://github.com/aliyun/alibaba-cloud-ops-mcp-server)** - Manage the lifecycle of your Alibaba Cloud resources with [CloudOps Orchestration Service](https://www.alibabacloud.com/en/product/oos) and Alibaba Cloud OpenAPI.\n- <img height=\"12\" width=\"12\" src=\"https://github.com/aliyun/alibabacloud-rds-openapi-mcp-server/blob/main/assets/alibabacloudrds.png\" alt=\"Alibaba Cloud RDS MySQL Logo\" /> **[Alibaba Cloud RDS](https://github.com/aliyun/alibabacloud-rds-openapi-mcp-server)** - An MCP server designed to interact with the Alibaba Cloud RDS OpenAPI, enabling programmatic management of RDS resources via an LLM.\n- <img height=\"12\" width=\"12\" src=\"https://www.alipayplus.com/favicon.ico\" alt=\"AlipayPlus Logo\" /> **[AlipayPlus](https://github.com/alipay/global-alipayplus-mcp)** - Connect your AI Agents to AlipayPlus Checkout Payment.\n- <img height=\"12\" width=\"12\" src=\"https://datalab.alkemi.ai/favicon.png\" alt=\"Alkemi Logo\" /> **[Alkemi](https://github.com/alkemi-ai/alkemi-mcp)** - Query Snowflake, Google BigQuery, DataBricks Data Products through Alkemi.ai.\n- <img height=\"12\" width=\"12\" src=\"https://cdn.allvoicelab.com/resources/workbench/dist/icon-dark.ico\" alt=\"AllVoiceLab Logo\" /> **[AllVoiceLab](https://www.allvoicelab.com/mcp)** - An AI voice toolkit with TTS, voice cloning, and video translation, now available as an MCP server for smarter agent integration.\n- <img height=\"12\" width=\"12\" src=\"https://files.alpaca.markets/webassets/favicon-32x32.png\" alt=\"Alpaca Logo\" /> **[Alpaca](https://github.com/alpacahq/alpaca-mcp-server)** – Alpaca's MCP server lets you trade stocks and options, analyze market data, and build strategies through [Alpaca's Trading API](https://alpaca.markets/)\n- <img height=\"12\" width=\"12\" src=\"https://www.alphavantage.co/logo.png/\" alt=\"AlphaVantage Logo\" /> **[AlphaVantage](https://mcp.alphavantage.co/)** - Connect to 100+ APIs for financial market data, including stock prices, fundamentals, and more from [AlphaVantage](https://www.alphavantage.co)\n- <img height=\"12\" width=\"12\" src=\"https://alttester.com/app/themes/alttester-sage-theme/public/images/logo-alttester.038ec8.png\" alt=\"AltTester Logo\" /> **[AltTester®](https://alttester.com/docs/desktop/latest/pages/ai-extension.html)** - Use AltTester® capabilities to connect and test your Unity or Unreal game. Write game test automation faster and smarter, using [AltTester](https://alttester.com) and the AltTester® MCP server. \n- <img height=\"12\" width=\"12\" src=\"https://raw.githubusercontent.com/amplitude/mcp-server-guide/refs/heads/main/amplitude-logo.svg\" alt=\"Amplitude Logo\" /> **[Amplitude](https://amplitude.com/docs/analytics/amplitude-mcp)** - The Amplitude MCP server enables seamless integration between AI assistants and your product data, allowing you to search, analyze, and query charts, dashboards, experiments, feature flags, and metrics directly from your AI interface.\n- <img height=\"12\" width=\"12\" src=\"https://www.antom.com/favicon.ico\" alt=\"Antom Logo\" /> **[Antom](https://github.com/alipay/global-antom-mcp)** - Connect your AI Agents to Antom Checkout Payment.\n- <img height=\"12\" width=\"12\" src=\"https://developers.anytype.io/img/favicon.ico\" alt=\"Anytype Logo\" /> **[Anytype](https://github.com/anyproto/anytype-mcp)** - An MCP server enabling AI assistants to interact with [Anytype](https://anytype.io) - a local and collaborative wiki - to organize objects, lists, and more through natural language.\n- <img height=\"12\" width=\"12\" src=\"https://doris.apache.org/images/favicon.ico\" alt=\"Apache Doris Logo\" /> **[Apache Doris](https://github.com/apache/doris-mcp-server)** - MCP Server For [Apache Doris](https://doris.apache.org/), an MPP-based real-time data warehouse.\n- <img height=\"12\" width=\"12\" src=\"https://iotdb.apache.org/img/logo.svg\" alt=\"Apache IoTDB Logo\" /> **[Apache IoTDB](https://github.com/apache/iotdb-mcp-server)** - MCP Server for [Apache IoTDB](https://github.com/apache/iotdb) database and its tools\n- **[Apache Pinot](https://github.com/startreedata/mcp-pinot)** – MCP server for running real - time analytics queries on Apache Pinot, an open-source OLAP database built for high-throughput, low-latency powering real-time applications.\n- <img height=\"12\" width=\"12\" src=\"https://apify.com/favicon.ico\" alt=\"Apify Logo\" /> **[Apify](https://github.com/apify/apify-mcp-server)** - Use 6,000+ pre-built cloud tools to extract data from websites, e-commerce, social media, search engines, maps, and more\n- <img height=\"12\" width=\"12\" src=\"https://2052727.fs1.hubspotusercontent-na1.net/hubfs/2052727/cropped-cropped-apimaticio-favicon-1-32x32.png\" alt=\"APIMatic Logo\" /> **[APIMatic MCP](https://github.com/apimatic/apimatic-validator-mcp)** - APIMatic MCP Server is used to validate OpenAPI specifications using [APIMatic](https://www.apimatic.io/). The server processes OpenAPI files and returns validation summaries by leveraging APIMatic's API.\n- <img height=\"12\" width=\"12\" src=\"https://apollo-server-landing-page.cdn.apollographql.com/_latest/assets/favicon.png\" alt=\"Apollo Graph Logo\" /> **[Apollo MCP Server](https://github.com/apollographql/apollo-mcp-server/)** - Connect your GraphQL APIs to AI agents\n- <img height=\"12\" width=\"12\" src=\"https://appium.io/docs/en/latest/assets/images/appium-logo-horiz.png\" alt=\"Appium Logo\" /> **[Appium MCP Server](https://github.com/appium/appium-mcp.git)** - MCP server for Mobile Development and Automation | iOS, Android, Simulator, Emulator, and Real Devices \n- <img height=\"12\" width=\"12\" src=\"https://developer.aqara.com/favicon.ico\" alt=\"Aqara Logo\" /> **[Aqara MCP Server](https://github.com/aqara/aqara-mcp-server/)** - Control  [Aqara](https://www.aqara.com/) smart home devices, query status, execute scenes, and much more using natural language.\n- <img height=\"12\" width=\"12\" src=\"https://media.licdn.com/dms/image/v2/C4D0BAQEeD7Dxbpadkw/company-logo_200_200/company-logo_200_200/0/1644692667545/archbee_logo?e=2147483647&v=beta&t=lTi9GRIoqzG6jN3kJC26uZWh0q3uiQelsH6mGoq_Wfw\" alt=\"Archbee Logo\" /> **[Archbee](https://www.npmjs.com/package/@archbee/mcp)** - Write and publish documentation that becomes the trusted source for instant answers with AI. Stop cobbling tools and use [Archbee](https://www.archbee.com/) — the first complete documentation platform.\n- <img height=\"12\" width=\"12\" src=\"https://phoenix.arize.com/wp-content/uploads/2023/04/cropped-Favicon-32x32.png\" alt=\"Arize-Phoenix Logo\" /> **[Arize Phoenix](https://github.com/Arize-ai/phoenix/tree/main/js/packages/phoenix-mcp)** - Inspect traces, manage prompts, curate datasets, and run experiments using [Arize Phoenix](https://github.com/Arize-ai/phoenix), an open-source AI and LLM observability tool.\n- <img height=\"12\" width=\"12\" src=\"https://731523176-files.gitbook.io/~/files/v0/b/gitbook-x-prod.appspot.com/o/spaces%2FaVUBXRZbpAgtjYf5HsvO%2Fuploads%2FaRRrVVocXCTr6GkepfCx%2Flogo_color.svg?alt=media&token=3ba24089-0ab2-421f-a9d9-41f2f94f954a\" alt=\"Armor Logo\" /> **[Armor Crypto MCP](https://github.com/armorwallet/armor-crypto-mcp)** - MCP to interface with multiple blockchains, staking, DeFi, swap, bridging, wallet management, DCA, Limit Orders, Coin Lookup, Tracking and more.\n- <img height=\"12\" width=\"12\" src=\"https://console.asgardeo.io/app/libs/themes/wso2is/assets/images/branding/favicon.ico\" alt=\"Asgardeo Logo\" /> **[Asgardeo](https://github.com/asgardeo/asgardeo-mcp-server)** - MCP server to interact with your [Asgardeo](https://wso2.com/asgardeo) organization through LLM tools.\n- <img height=\"12\" width=\"12\" src=\"https://www.datastax.com/favicon-32x32.png\" alt=\"DataStax logo\" /> **[Astra DB](https://github.com/datastax/astra-db-mcp)** - Comprehensive tools for managing collections and documents in a [DataStax Astra DB](https://www.datastax.com/products/datastax-astra) NoSQL database with a full range of operations such as create, update, delete, find, and associated bulk actions.\n- <img height=\"12\" width=\"12\" src=\"https://cdn.prod.website-files.com/66598898fd13d51606c3215d/66ccbfef13bd8bc19d587578_favicon-32x32.png\" alt=\"Atla Logo\" /> **[Atla](https://github.com/atla-ai/atla-mcp-server)** - Enable AI agents to interact with the [Atla API](https://docs.atla-ai.com/) for state-of-the-art LLMJ evaluation.\n- <img height=\"12\" width=\"12\" src=\"https://assets.atlan.com/assets/atlan-a-logo-blue-background.png\" alt=\"Atlan Logo\" /> **[Atlan](https://github.com/atlanhq/agent-toolkit/tree/main/modelcontextprotocol)** - The Atlan Model Context Protocol server allows you to interact with the [Atlan](https://www.atlan.com/) services through multiple tools.\n- <img height=\"12\" width=\"12\" src=\"https://www.atlassian.com/favicon.ico\" alt=\"Atlassian Logo\" /> **[Atlassian](https://www.atlassian.com/platform/remote-mcp-server)** - Securely interact with Jira work items and Confluence pages, and search across both.\n- <img height=\"12\" width=\"12\" src=\"https://res.oafimg.cn/-/737b3b3ffed9b19e/logo.png\" alt=\"AtomGit Logo\" /> **[AtomGit](https://atomgit.com/atomgit-open-source-ecosystem/atomgit-mcp-server)** - Official AtomGit server for integration with repository management, PRs, issues, branches, labels, and more.\n- <img height=\"12\" width=\"12\" src=\"https://atono.io/favicon.ico\" alt=\"Atono Logo\" /> **[Atono](https://docs.atono.io/docs/mcp-server-for-atono/)** - Modern product teams connect their AI assistant to Atono to create and update stories, bugs, assignments and fixes.\n- <img height=\"12\" width=\"12\" src=\"https://resources.audiense.com/hubfs/favicon-1.png\" alt=\"Audiense Logo\" /> **[Audiense Insights](https://github.com/AudienseCo/mcp-audiense-insights)** - Marketing insights and audience analysis from [Audiense](https://www.audiense.com/products/audiense-insights) reports, covering demographic, cultural, influencer, and content engagement analysis.\n- <img height=\"12\" width=\"12\" src=\"https://cdn.auth0.com/website/website/favicons/auth0-favicon.svg\" alt=\"Auth0 Logo\" /> **[Auth0](https://github.com/auth0/auth0-mcp-server)** - MCP server for interacting with your Auth0 tenant, supporting creating and modifying actions, applications, forms, logs, resource servers, and more.\n- <img height=\"12\" width=\"12\" src=\"https://firstorder.ai/favicon_auth.ico\" alt=\"Authenticator App Logo\" /> **[Authenticator App · 2FA](https://github.com/firstorderai/authenticator_mcp)** - A secure MCP (Model Context Protocol) server that enables AI agents to interact with the Authenticator App.\n- <img height=\"12\" width=\"12\" src=\"https://a0.awsstatic.com/libra-css/images/site/fav/favicon.ico\" alt=\"AWS Logo\" /> **[AWS](https://github.com/awslabs/mcp)** -  Specialized MCP servers that bring AWS best practices directly to your development workflow.\n- <img height=\"12\" width=\"12\" src=\"https://axiom.co/favicon.ico\" alt=\"Axiom Logo\" /> **[Axiom](https://github.com/axiomhq/mcp-server-axiom)** - Query and analyze your Axiom logs, traces, and all other event data in natural language\n- <img height=\"12\" width=\"12\" src=\"https://cdn-dynmedia-1.microsoft.com/is/content/microsoftcorp/acom_social_icon_azure\" alt=\"Microsoft Azure Logo\" /> **[Azure](https://github.com/microsoft/mcp/tree/main/servers/Azure.Mcp.Server)** - The Azure MCP Server gives MCP Clients access to key Azure services and tools like Azure Storage, Cosmos DB, the Azure CLI, and more.\n- <img height=\"12\" width=\"12\" src=\"https://cdn-dynmedia-1.microsoft.com/is/content/microsoftcorp/1062064-Products-1.2-24x24\" alt=\"Microsoft Azure DevOps Logo\" /> **[Azure DevOps](https://github.com/microsoft/azure-devops-mcp)** - Interact with Azure DevOps services like repositories, work items, builds, releases, test plans, and code search.\n- <img height=\"12\" width=\"12\" src=\"https://application.backdocket.com/favicon.ico\" alt=\"Backdocket Logo\" /> **[Backdocket](https://ai.backdocket.com)** - Search, Retrieve, and Update your **[Backdocket](https://backdocket.com)** data. This currently includes Claims, Matters, Contacts, Tasks and Advanced Searches. To easily use the Remote Mcp Server utilize the following url: **[https://ai.backdocket.com/mcp]([https://backdocket.com](https://ai.backdocket.com/mcp))**\n- <img height=\"12\" width=\"12\" src=\"https://mapopen-website-wiki.cdn.bcebos.com/LOGO/lbsyunlogo_icon.ico\" alt=\"Baidu Map Logo\" /> **[Baidu Map](https://github.com/baidu-maps/mcp)** - [Baidu Map MCP Server](https://lbsyun.baidu.com/faq/api?title=mcpserver/base) provides tools for AI agents to interact with Baidu Maps APIs, enabling location-based services and geospatial data analysis.\n- <img height=\"12\" width=\"12\" src=\"https://www.bankless.com/favicon.ico\" alt=\"Bankless Logo\" /> **[Bankless Onchain](https://github.com/bankless/onchain-mcp)** - Query Onchain data, like ERC20 tokens, transaction history, smart contract state.\n- <img height=\"12\" width=\"12\" src=\"https://baserow.io/img/logo_baserow_square_large.png\" alt=\"Baserow Logo\" /> **[Baserow](https://gitlab.com/baserow/baserow/-/tree/develop/backend/src/baserow/api/mcp)** - Query data from Baserow self-hosted or SaaS databases using MCP integration.\n- <img height=\"12\" width=\"12\" src=\"https://cdn.prod.website-files.com/6815c48ebd95a588d14e383b/68582c01f6420d9777922095_xAsset%20114rt.avif\" alt=\"Bauplan Logo\" /> **[Bauplan](https://github.com/BauplanLabs/bauplan-mcp-server)** - Manage the Bauplan lakehouse: query tables, create data branches, run pipelines, retrieve logs.\n- <img height=\"12\" width=\"12\" src=\"https://bicscan.io/favicon.png\" alt=\"BICScan Logo\" /> **[BICScan](https://github.com/ahnlabio/bicscan-mcp)** - Risk score / asset holdings of EVM blockchain address (EOA, CA, ENS) and even domain names.\n- <img height=\"12\" width=\"12\" src=\"https://www.bitnovo.com/favicons/favicon-196x196.png\" alt=\"Bitnovo Logo\" /> **[Bitnovo Pay](https://github.com/bitnovo/mcp-bitnovo-pay)** - Cryptocurrency payment integration enabling AI agents to create payments, manage QR codes, and process transactions through the Bitnovo Pay API with support for Bitcoin, Ethereum, and other cryptocurrencies.\n- <img height=\"12\" width=\"12\" src=\"https://web-cdn.bitrise.io/favicon.ico\" alt=\"Bitrise Logo\" /> **[Bitrise](https://github.com/bitrise-io/bitrise-mcp)** - Chat with your builds, CI, and [more](https://bitrise.io/blog/post/chat-with-your-builds-ci-and-more-introducing-the-bitrise-mcp-server).\n- <img height=\"12\" width=\"12\" src=\"https://boikot.xyz/assets/favicon.svg\" alt=\"boikot Logo\" /> **[Boikot](https://github.com/boikot-xyz/boikot)** - Learn about the ethical and unethical actions of major companies with [boikot.xyz](https://boikot.xyz/).\n- <img height=\"12\" width=\"12\" src=\"https://boldsign.com/favicon.ico\" alt=\"BoldSign Logo\" /> **[BoldSign](https://github.com/boldsign/boldsign-mcp)** - Search, request, and manage e-signature contracts effortlessly with [BoldSign](https://boldsign.com/).\n- <img height=\"12\" width=\"12\" src=\"https://boost.space/favicon.ico\" alt=\"Boost.space Logo\" /> **[Boost.space](https://github.com/boostspace/boostspace-mcp-server)** - An MCP server integrating with [Boost.space](https://boost.space) for centralized, automated business data from 2000+ sources.\n- <img height=\"12\" width=\"12\" src=\"https://boostsecurity.io/hs-fs/hubfs/blue-logo.png\" alt=\"BoostSecurity Logo\" /> **[BoostSecurity](https://github.com/boost-community/boost-mcp)** - Powered by [BoostSecurity](https://boostsecurity.io/), the MCP guardrails coding agents against introducing dependencies with vulnerabilities, malware or typosquatting.\n- <img height=\"12\" width=\"12\" src=\"https://www.box.com/favicon.ico\" alt=\"Box Logo\" /> **[Box](https://github.com/box-community/mcp-server-box)** - Interact with the Intelligent Content Management platform through Box AI.\n- <img height=\"12\" width=\"12\" src=\"https://www.brightdata.com/favicon.ico\" alt=\"BrightData Logo\" /> **[BrightData](https://github.com/luminati-io/brightdata-mcp)** - Discover, extract, and interact with the web - one interface powering automated access across the public internet.\n- <img height=\"12\" width=\"12\" src=\"https://browserbase.com/favicon.ico\" alt=\"Browserbase Logo\" /> **[Browserbase](https://github.com/browserbase/mcp-server-browserbase)** - Automate browser interactions in the cloud (e.g. web navigation, data extraction, form filling, and more)\n- <img height=\"12\" width=\"12\" src=\"https://browserstack.wpenginepowered.com/wp-content/themes/browserstack/img/favicons/favicon.ico\" alt=\"BrowserStack Logo\" /> **[BrowserStack](https://github.com/browserstack/mcp-server)** - Access BrowserStack's [Test Platform](https://www.browserstack.com/test-platform) to debug, write and fix tests, do accessibility testing and more.\n- <img height=\"12\" width=\"12\" src=\"https://bldbl.dev/favico.png\" alt=\"Buildable Logo\" />**[Buildable](https://github.com/chunkydotdev/bldbl-mcp)** (TypeScript) - Official MCP server for Buildable AI-powered development platform. Enables AI assistants to manage tasks, track progress, get project context, and collaborate with humans on software projects.\n- <img height=\"12\" width=\"12\" src=\"https://www.google.com/s2/favicons?domain=buildkite.com&sz=24\" alt=\"Buildkite Logo\" /> **[Buildkite](https://github.com/buildkite/buildkite-mcp-server)** - Exposing Buildkite data (pipelines, builds, jobs, tests) to AI tooling and editors.\n- <img height=\"12\" width=\"12\" src=\"https://builtwith.com/favicon.ico\" alt=\"BuiltWith Logo\" /> **[BuiltWith](https://github.com/builtwith/mcp)** - Identify the technology stack behind any website.\n- <img height=\"12\" width=\"12\" src=\"https://portswigger.net/favicon.ico\" alt=\"PortSwigger Logo\" /> **[Burp Suite](https://github.com/PortSwigger/mcp-server)** - MCP Server extension allowing AI clients to connect to [Burp Suite](https://portswigger.net)\n- <img src=\"https://app.cal.com/favicon.ico\" alt=\"Cal.com\" width=\"12\" height=\"12\"> **[Cal.com](https://www.npmjs.com/package/@calcom/cal-mcp?activeTab=readme)** - Connect to the Cal.com API to schedule and manage bookings and appointments.\n- <img height=\"12\" width=\"12\" src=\"https://campertunity.com/assets/icon/favicon.ico\" alt=\"Campertunity Logo\" /> **[Campertunity](https://github.com/campertunity/mcp-server)** - Search campgrounds around the world on campertunity, check availability, and provide booking links.\n- <img height=\"12\" width=\"12\" src=\"https://static.canva.com/static/images/favicon.ico\" alt=\"Canva logo\" /> **[Canva](https://www.canva.dev/docs/apps/mcp-server/)** — Provide AI - powered development assistance for [Canva](https://canva.com) apps and integrations.\n- <img height=\"12\" width=\"12\" src=\"https://carbonvoice.app/favicon.ico\" alt=\"Carbon Voice Logo\" /> **[Carbon Voice](https://github.com/PhononX/cv-mcp-server)** - MCP Server that connects AI Agents to [Carbon Voice](https://getcarbon.app). Create, manage, and interact with voice messages, conversations, direct messages, folders, voice memos, AI actions and more in [Carbon Voice](https://getcarbon.app).\n- <img height=\"12\" width=\"12\" src=\"https://play.cartesia.ai/icon.png\" alt=\"Cartesia logo\" /> **[Cartesia](https://github.com/cartesia-ai/cartesia-mcp)** - Connect to the [Cartesia](https://cartesia.ai/) voice platform to perform text-to-speech, voice cloning etc.\n- <img height=\"12\" width=\"12\" src=\"https://www.cashfree.com/favicon.ico\" alt=\"Cashfree logo\" /> **[Cashfree](https://github.com/cashfree/cashfree-mcp)** - [Cashfree Payments](https://www.cashfree.com/) official MCP server.\n- **[CB Insights](https://github.com/cbinsights/cbi-mcp-server)** - Use the [CB Insights](https://www.cbinsights.com) MCP Server to connect to [ChatCBI](https://www.cbinsights.com/chatcbi/)\n- <img height=\"12\" width=\"12\" src=\"https://chainaware.ai/assets/brand/chainawareai-logo.svg\" alt=\"ChainAware.ai Logo\" /> **[Behavioural Prediction](https://github.com/ChainAware/behavioral-prediction-mcp)** - AI-powered tools to analyze wallet behaviour prediction,fraud detection and rug pull prediction powered by [ChainAware.ai](https://www.chainaware.ai).\n- <img height=\"12\" width=\"12\" src=\"https://www.chargebee.com/static/resources/brand/favicon.png\" alt=\"Chargebee Logo\" /> **[Chargebee](https://github.com/chargebee/agentkit/tree/main/modelcontextprotocol)** - MCP Server that connects AI agents to [Chargebee platform](https://www.chargebee.com).\n- <img height=\"12\" width=\"12\" src=\"https://cheqd.io/wp-content/uploads/2023/03/logo_cheqd_favicon.png\" alt=\"Cheqd Logo\" /> **[Cheqd](https://github.com/cheqd/mcp-toolkit)** - Enable AI Agents to be trusted, verified, prevent fraud, protect your reputation, and more through [cheqd's](https://cheqd.io) Trust Registries and Credentials.\n- <img height=\"12\" width=\"12\" src=\"https://cdn.chiki.studio/brand/logo.png\" alt=\"Chiki StudIO Logo\" /> **[Chiki StudIO](https://chiki.studio/galimybes/mcp/)** - Create your own configurable MCP servers purely via configuration (no code), with instructions, prompts, and tools support.\n- <img height=\"12\" width=\"12\" src=\"https://trychroma.com/_next/static/media/chroma-logo.ae2d6e4b.svg\" alt=\"Chroma Logo\" /> **[Chroma](https://github.com/chroma-core/chroma-mcp)** - Embeddings, vector search, document storage, and full-text search with the open-source AI application database\n- <img height=\"12\" width=\"12\" src=\"https://www.google.com/chrome/static/images/favicons/favicon-32x32.png\" alt=\"Chrome\" /> **[Chrome DevTools](https://github.com/ChromeDevTools/chrome-devtools-mcp)** - Enable AI coding assistants to debug web pages directly in Chrome, providing runtime insights and debugging capabilities.\n- <img height=\"12\" width=\"12\" src=\"https://www.chronulus.com/favicon/chronulus-logo-blue-on-alpha-square-128x128.ico\" alt=\"Chronulus AI Logo\" /> **[Chronulus AI](https://github.com/ChronulusAI/chronulus-mcp)** - Predict anything with Chronulus AI forecasting and prediction agents.\n- <img height=\"12\" width=\"12\" src=\"https://circleci.com/favicon.ico\" alt=\"CircleCI Logo\" /> **[CircleCI](https://github.com/CircleCI-Public/mcp-server-circleci)** - Enable AI Agents to fix build failures from CircleCI.\n- <img height=\"12\" width=\"12\" src=\"https://assets.zilliz.com/Zilliz_Logo_Mark_White_20230223_041013_86057436cc.png\" alt=\"Claude Context Logo\" /> **[Claude Context](https://github.com/zilliztech/claude-context)** - Bring your codebase as context to Claude Code\n- <img height=\"12\" width=\"12\" src=\"https://cleanupcrew.ai/favicon-light.png\" alt=\"Cleanup Crew logo\" /> **[Cleanup Crew](https://cleanupcrew.ai/install)** - Real-time human support service for non-technical founders using AI coding tools. When AI hits a wall, request instant human help directly from your IDE.\n- <img height=\"12\" width=\"12\" src=\"https://clickhouse.com/favicon.ico\" alt=\"ClickHouse Logo\" /> **[ClickHouse](https://github.com/ClickHouse/mcp-clickhouse)** - Query your [ClickHouse](https://clickhouse.com/) database server.\n- <img height=\"12\" width=\"12\" src=\"https://brand.clicksend.com/_ipx/s_794x608/img/clicksend_icon_only.svg\" alt=\"ClickSend Logo\" /> **[ClickSend](https://github.com/ClickSend/clicksend-mcp-server/)** - This is the official ClickSend MCP Server developed by ClickSend team.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/206176626?s=200&v=4\" alt=\"Clix Logo\" /> **[Clix MCP Server](https://github.com/clix-so/clix-mcp-server)** - Clix MCP Server that enables AI agents to provide real-time, trusted Clix documentation and SDK code examples for seamless integrations.\n- <img height=\"12\" width=\"12\" src=\"https://7463-tcb-advanced-a656fc-1257967285.tcb.qcloud.la/mcp/cloudbase-logo.svg\" alt=\"CloudBase Logo\" /> **[CloudBase](https://github.com/TencentCloudBase/CloudBase-AI-ToolKit)** - One-stop backend services for WeChat Mini-Programs and full-stack apps with serverless cloud functions and databases by [Tencent CloudBase](https://tcb.cloud.tencent.com/)\n- <img height=\"12\" width=\"12\" src=\"https://www.cloudbees.com/favicon.ico\" alt=\"CloudBees Logo\" /> **[CloudBees CI](https://docs.cloudbees.com/docs/cloudbees-ci-mcp-router/latest/)** - Enable AI access to your [CloudBees CI](https://www.cloudbees.com/capabilities/continuous-integration) cluster, the Enterprise-grade Jenkins®-based solution. \n- <img height=\"12\" width=\"12\" src=\"https://www.cloudbees.com/favicon.ico\" alt=\"CloudBees Logo\" /> **[CloudBees Unify](https://docs.cloudbees.com/docs/cloudbees-unify-mcp-server/latest/install/mcp-server)** - Enable AI access to your [CloudBees Unify](https://www.cloudbees.com/unify) environment.\n- <img height=\"12\" width=\"12\" src=\"https://www.cloudbet.com/favicon.ico\" alt=\"Cloudbet Logo\" /> **[Cloudbet](https://github.com/cloudbet/sports-mcp-server)** - Structured sports and esports data via Cloudbet API: fixtures, live odds, stake limits, and markets.\n- <img src=\"http://www.google.com/s2/favicons?domain=www.cloudera.com\" alt=\"Cloudera Iceberg\" width=\"12\" height=\"12\"> **[Cloudera Iceberg](https://github.com/cloudera/iceberg-mcp-server)** - enabling AI on the [Open Data Lakehouse](https://www.cloudera.com/products/open-data-lakehouse.html).\n- <img height=\"12\" width=\"12\" src=\"https://cdn.simpleicons.org/cloudflare\" /> **[Cloudflare](https://github.com/cloudflare/mcp-server-cloudflare)** - Deploy, configure & interrogate your resources on the Cloudflare developer platform (e.g. Workers/KV/R2/D1)\n- <img src=\"https://cdn.prod.website-files.com/64d41aab8183c7c3324ddb29/67c0f1e272e51cf3c511c17c_Gyph.svg\" alt=\"Cloudinary\" width=\"12\" height=\"12\"> **[Cloudinary](https://github.com/cloudinary/mcp-servers)** - Exposes Cloudinary's media upload, transformation, AI analysis, management, optimization and delivery as tools usable by AI agents\n- <img height=\"12\" width=\"12\" src=\"https://raw.githubusercontent.com/Cloudsway-AI/smartsearch/refs/heads/main/plugin_cloudsway.ico\" alt=\"Cloudsway Logo\" /> **[Cloudsway SmartSearch](https://github.com/Cloudsway-AI/smartsearch)** - Web search MCP server powered by Cloudsway, supporting keyword search, language, and safety options. Returns structured JSON results.\n- <img height=\"12\" width=\"12\" src=\"https://app.codacy.com/static/images/favicon-16x16.png\" alt=\"Codacy Logo\" /> **[Codacy](https://github.com/codacy/codacy-mcp-server/)** - Interact with [Codacy](https://www.codacy.com) API to query code quality issues, vulnerabilities, and coverage insights about your code.\n- <img height=\"12\" width=\"12\" src=\"https://codelogic.com/wp-content/themes/codelogic/assets/img/favicon.png\" alt=\"CodeLogic Logo\" /> **[CodeLogic](https://github.com/CodeLogicIncEngineering/codelogic-mcp-server)** - Interact with [CodeLogic](https://codelogic.com), a Software Intelligence platform that graphs complex code and data architecture dependencies, to boost AI accuracy and insight.\n- <img height=\"12\" width=\"12\" src=\"https://www.coinex.com/_assets/img/brand/svg/day-1.svg\" alt=\"Coinex Logo\" /> **[Coinex](https://github.com/coinexcom/coinex_mcp_server)** - Official [Coinex API](https://docs.coinex.com/api/v2). An MCP Server to interface with the CoinEx cryptocurrency exchange, enabling retrieve of market data, K-line data, order book depth, account balance queries, order placement and more.\n- <img height=\"12\" width=\"12\" src=\"https://www.coingecko.com/favicon.ico\" alt=\"CoinGecko Logo\" /> **[CoinGecko](https://github.com/coingecko/coingecko-typescript/tree/main/packages/mcp-server)** - Official [CoinGecko API](https://www.coingecko.com/en/api) MCP Server for Crypto Price & Market Data, across 200+ Blockchain Networks and 8M+ Tokens.\n- <img height=\"12\" width=\"12\" src=\"https://coinstats.app/favicon.ico\" alt=\"CoinStats Logo\" /> **[CoinStats](https://github.com/CoinStatsHQ/coinstats-mcp)** - MCP Server for the [CoinStats API](https://coinstats.app/api-docs/mcp/connecting). Provides access to cryptocurrency market data, portfolio tracking and news.\n- <img height=\"12\" width=\"12\" src=\"https://www.comet.com/favicon.ico\" alt=\"Comet Logo\" /> **[Comet Opik](https://github.com/comet-ml/opik-mcp)** - Query and analyze your [Opik](https://github.com/comet-ml/opik) logs, traces, prompts and all other telemetry data from your LLMs in natural language.\n- <img height=\"12\" width=\"12\" src=\"https://www.commercelayer.io/favicon.ico\" alt=\"Commerce Layer Logo\" /> **[Commerce Layer](https://github.com/commercelayer/mcp-server-metrics)** - Interact with Commerce Layer Metrics API.\n- <img height=\"12\" width=\"12\" src=\"https://platform.composio.dev/favicon.ico\" alt=\"Composio Logo\" /> **[Composio](https://docs.composio.dev/docs/mcp-overview#-getting-started)** – Use [Composio](https://composio.dev) to connect 100+ tools. Zero setup. Auth built-in. Made for agents, works for humans.\n- <img height=\"12\" width=\"12\" src=\"https://cdn.prod.website-files.com/6572bd8c27ee5db3eb91f4b3/6572bd8d27ee5db3eb91f55e_favicon-dashflow-webflow-template.svg\" alt=\"OSS Conductor Logo\" /> <img height=\"12\" width=\"12\" src=\"https://cdn.prod.website-files.com/68c3f472828bb14d0564ad4a/68c3f472828bb14d0564b0ab_Orkes%20Logo%20Symbol.svg\" alt=\"Orkes Conductor Logo\" />**[Conductor](https://github.com/conductor-oss/conductor-mcp)** - Interact with Conductor (OSS and Orkes) REST APIs.\n- <img height=\"12\" width=\"12\" src=\"https://configcat.com/favicon.ico\" alt=\"ConfigCat Logo\" /> **[ConfigCat](https://github.com/configcat/mcp-server)** - Enables AI tools to interact with [ConfigCat](https://configcat.com), a feature flag service for teams. Supports managing ConfigCat feature flags, configs, environments, products and organizations. Helps to integrate ConfigCat SDK, implement feature flags and remove zombie (stale) flags.\n- <img height=\"12\" width=\"12\" src=\"https://www.confluent.io/favicon.ico\" alt=\"Confluent Logo\" /> **[Confluent](https://github.com/confluentinc/mcp-confluent)** - Interact with Confluent Kafka and Confluent Cloud REST APIs.\n- <img height=\"12\" width=\"12\" src=\"https://github.com/mattjoyce.png\" alt=\"Construe Logo\" /> **[Construe](https://github.com/mattjoyce/mcp-construe)** - FastMCP server for intelligent Obsidian vault context management with frontmatter filtering, automatic chunking, and secure bidirectional knowledge operations.\n- <img height=\"12\" width=\"12\" src=\"https://ginylil.com/favicon.ico\" alt=\"Ginylil Logo\" /> **[Context Templates](https://github.com/ginylil/context-templates)** - An open-source collection of reusable context templates designed to assist developers in structuring prompts, configurations, and workflows across various development tasks. Community contributions are encouraged to expand and refine available templates.\n- <img src=\"https://contrastsecurity.com/favicon.ico\" alt=\"Contrast Security\" width=\"12\" height=\"12\"> **[Contrast Security](https://github.com/Contrast-Security-OSS/mcp-contrast)** - Brings Contrast's vulnerability and SCA data into your coding agent to quickly remediate vulnerabilities.\n- <img height=\"12\" width=\"12\" src=\"https://www.convex.dev/favicon.ico\" alt=\"Convex Logo\" /> **[Convex](https://stack.convex.dev/convex-mcp-server)** - Introspect and query your apps deployed to Convex.\n- <img height=\"12\" width=\"12\" src=\"https://www.cortex.io/favicon.ico\" alt=\"Cortex Logo\" /> **[Cortex](https://github.com/cortexapps/cortex-mcp)** - Official MCP server for [Cortex](https://www.cortex.io).\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/605755?s=200&v=4\" alt=\"Couchbase Logo\" /> **[Couchbase](https://github.com/Couchbase-Ecosystem/mcp-server-couchbase)** - Interact with the data stored in Couchbase clusters.\n- <img height=\"12\" width=\"12\" src=\"https://www.courier.com/favicon.ico\" alt=\"Courier Logo\" /> **[Courier](https://www.courier.com/docs/tools/mcp)** - Build, update, and send multi-channel notifications across email, sms, push, Slack, and Microsoft Teams.\n- <img height=\"12\" width=\"12\" src=\"https://github.com/user-attachments/assets/b256f9fa-2020-4b37-9644-c77229ef182b\" alt=\"CRIC 克而瑞 LOGO\"> **[CRIC Wuye AI](https://github.com/wuye-ai/mcp-server-wuye-ai)** - Interact with capabilities of the CRIC Wuye AI platform, an intelligent assistant specifically for the property management industry.\n- <img height=\"12\" width=\"12\" src=\"https://www.crowdstrike.com/etc.clientlibs/crowdstrike/clientlibs/crowdstrike-common/resources/favicon.ico\" alt=\"CrowdStrike Logo\" /> **[CrowdStrike Falcon](https://github.com/CrowdStrike/falcon-mcp)** - Connects AI agents with the CrowdStrike Falcon platform for intelligent security analysis, providing programmatic access to detections, incidents, behaviors, threat intelligence, hosts, vulnerabilities, and identity protection capabilities.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/58433296\" alt=\"CTERA Edge Filer\" /> **[CTERA Edge Filer](https://github.com/ctera/mcp-ctera-edge)** - CTERA Edge Filer delivers intelligent edge caching and multiprotocol file access, enabling fast, secure access to files across core and remote sites.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/58433296\" alt=\"CTERA Portal\" /> **[CTERA Portal](https://github.com/ctera/mcp-ctera-core)** - CTERA Portal is a multi-tenant, multi-cloud platform that delivers a global namespace and unified management across petabytes of distributed content.\n- <img height=\"12\" width=\"12\" src=\"https://customer.io/favicon.ico\" alt=\"Customer.io Logo\" /> **[Customer.io](https://docs.customer.io/ai/mcp-server/)** - Let any LLM work directly with your Customer.io workspace to create segments, inspect user profiles, search for customers, and access workspace data. Analyze customer attributes, manage audience targeting, and explore your workspace without switching tabs.\n- <img height=\"12\" width=\"12\" src=\"https://app.cycode.com/img/favicon.ico\" alt=\"Cycode Logo\" /> **[Cycode](https://github.com/cycodehq/cycode-cli#mcp-command-experiment)** - Boost security in your dev lifecycle via SAST, SCA, Secrets & IaC scanning with [Cycode](https://cycode.com/).\n- <img height=\"12\" width=\"12\" src=\"http://app.itsdart.com/static/img/favicon.png\" alt=\"Dart Logo\" /> **[Dart](https://github.com/its-dart/dart-mcp-server)** - Interact with task, doc, and project data in [Dart](https://itsdart.com), an AI-native project management tool\n- <img height=\"12\" width=\"12\" src=\"https://cdn.bfldr.com/9AYANS2F/at/k8bgnnxhb4bggjk88r4x9snf/databricks-symbol-color.svg?auto=webp&format=png&width=12&height=13\" alt=\"Databricks Logo\" /> **[Databricks](https://docs.databricks.com/aws/en/generative-ai/mcp/)** - Connect to data, AI tools & agents, and the rest of the Databricks platform using turnkey managed MCP servers. Or, host your own custom MCP servers within the Databricks security and data governance boundary.\n- <img height=\"12\" width=\"12\" src=\"https://datahub.com/wp-content/uploads/2025/04/cropped-Artboard-1-32x32.png\" alt=\"DataHub Logo\" /> **[DataHub](https://github.com/acryldata/mcp-server-datahub)** - Search your data assets, traverse data lineage, write SQL queries, and more using [DataHub](https://datahub.com/) metadata.\n- <img height=\"12\" width=\"12\" src=\"https://www.datawrapper.de/favicon-32x32.png\" alt=\"Datawrapper logo\"> **[Datawrapper](https://github.com/palewire/datawrapper-mcp)** - A  Model Context Protocol (MCP) server for creating [Datawrapper](https://datawrapper.de) charts using AI assistants.\n- <img height=\"12\" width=\"12\" src=\"https://www.daytona.io/brand/social-daytona-icon.png\" alt=\"Daytona Logo\" /> **[Daytona](https://github.com/daytonaio/daytona/tree/main/apps/cli/mcp)** - Fast and secure execution of your AI generated code with [Daytona](https://daytona.io) sandboxes\n- <img height=\"12\" width=\"12\" src=\"https://debugg.ai/favicon.svg\" alt=\"Debugg AI Logo\" /> **[Debugg.AI](https://github.com/debugg-ai/debugg-ai-mcp)** - Zero-Config, Fully AI-Managed End-to-End Testing for any code gen platform via [Debugg.AI](https://debugg.ai) remote browsing test agents.\n- <img height=\"12\" width=\"12\" src=\"https://www.deepl.com/img/logo/deepl-logo-blue.svg\" alt=\"DeepL Logo\" /> **[DeepL](https://github.com/DeepLcom/deepl-mcp-server)** - Translate or rewrite text with [DeepL](https://deepl.com)'s very own AI models using [the DeepL API](https://developers.deepl.com/docs)\n- <img height=\"12\" width=\"12\" src=\"https://web-st.oss-cn-shanghai.aliyuncs.com/www/static/icon/bitbug_favicon.ico\" alt=\"DeepQ Logo\"> **[DeepQ](https://github.com/shenqingtech/deepq-financial-toolkit-mcp-server)** - DeepQ Technology's Financial Toolkit MCP Server is an Chinese Financial AI toolkit provides comprehensive financial data and analytical tool support for AI large language models.\n- <img height=\"12\" width=\"12\" src=\"https://defang.io/_next/static/media/defang-icon-dark-colour.25f95b77.svg\" alt=\"Defang Logo\" /> **[Defang](https://github.com/DefangLabs/defang/blob/main/src/pkg/mcp/README.md)** - Deploy your project to the cloud seamlessly with the [Defang](https://www.defang.io) platform without leaving your integrated development environment\n- <img height=\"12\" width=\"12\" src=\"https://deployhq.com/assets/favicon-357ebe39b58f28869358da83948e76e7cadfb0791c97af34abfe346f5e3ef634.png\" alt=\"DeployHQ Logo\" /> **[DeployHQ](https://github.com/deployhq/deployhq-mcp-server)** – MCP server for DeployHQ API integration, enabling AI assistants to manage deployments, list projects, and monitor deployment status.\n- <img height=\"12\" width=\"12\" src=\"https://destinia.com/headers/ilusion/sunrise/dist/favicon/favicon-16x16.png?v=PCJysKzN\" alt=\"Destinia Logo\" /> **[Destinia](https://destinia.com/developers)** - Provider tools to search for hotels in Destinia and get listing details.\n- <img height=\"12\" width=\"12\" src=\"https://detailer.ginylil.com/favicon.ico\" alt=\"Detailer Logo\" /> **[Detailer](https://detailer.ginylil.com/)** – Instantly generate rich, AI-powered documentation for your GitHub repositories. Designed for AI agents to gain deep project context before taking action.\n- <img height=\"12\" width=\"12\" src=\"https://devcycle.com/_next/image?url=%2Fassets%2Fbrand%2FColor-logo-mark.png&w=384&q=75\" alt=\"DevCycle Logo\" /> **[DevCycle](https://docs.devcycle.com/cli-mcp/mcp-getting-started)** - Create and monitor feature flags using natural language in your AI coding assistant.\n- <img height=\"12\" width=\"12\" src=\"https://www.devexpress.com/Content/Core/favicon.ico\" alt=\"DevExpress Logo\" /> **[DevExpress](https://docs.devexpress.com/GeneralInformation/405551/help-resources/dev-express-documentation-mcp-server-configure-an-ai-powered-assistant)** Documentation MCP server — Get instant, AI-powered access to 300,000+ help topics on [DevExpress](https://www.devexpress.com) UI Component APIs — right in the AI Coding Assistant/IDE of your choice.\n- <img height=\"12\" width=\"12\" src=\"https://www.devhub.com/img/upload/favicon-196x196-dh.png\" alt=\"DevHub Logo\" /> **[DevHub](https://github.com/devhub/devhub-cms-mcp)** - Manage and utilize website content within the [DevHub](https://www.devhub.com) CMS platform\n- <img height=\"12\" width=\"12\" src=\"https://devrev.ai/favicon.ico\" alt=\"DevRev Logo\" /> **[DevRev](https://github.com/devrev/mcp-server)** - An MCP server to integrate with DevRev APIs to search through your DevRev Knowledge Graph where objects can be imported from diff. Sources listed [here](https://devrev.ai/docs/import#available-sources).\n- <img height=\"12\" width=\"12\" src=\"https://dexpaprika.com/favicon.ico\" alt=\"DexPaprika Logo\" /> **[DexPaprika (CoinPaprika)](https://github.com/coinpaprika/dexpaprika-mcp)** - Access real-time DEX data, liquidity pools, token information, and trading analytics across multiple blockchain networks with [DexPaprika](https://dexpaprika.com) by CoinPaprika.\n- **[Diffusion](https://github.com/diffusiondata/diffusion-mcp-server)** - Connect to any Diffusion server to explore topics, create/update topics, manage sessions, configure features like topic views and metrics, and monitor the server.\n- <img height=\"12\" width=\"12\" src=\"https://github.com/dolthub/dolt/raw/main/images/Dolt-Logo@3x.svg\" alt=\"Dolt Logo\" /> **[Dolt](https://github.com/dolthub/dolt-mcp)** - The official MCP server for version-controlled [Dolt](https://doltdb.com/) databases.\n- <img height=\"12\" width=\"12\" src=\"https://eu.getdot.ai/favicon.ico\" alt=\"GetDot.ai Logo\" /> **[Dot (GetDot.ai)](https://docs.getdot.ai/dot/integrations/mcp)** - Fetch, analyze or visualize data from your favorite database or data warehouse (Snowflake, BigQuery, Redshift, Databricks, Clickhouse, ...) with [Dot](https://getdot.ai), your AI Data Analyst. This remote MCP server is a one-click integration for user that have setup Dot.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/65421071?s=200&v=4\" alt=\"Drata Logo\" /> **[Drata](https://drata.com/mcp)** - Get hands-on with our experimental MCP server—bringing real-time compliance intelligence into your AI workflows.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/204530939?s=200&v=4\" alt=\"Dumpling AI Logo\" /> **[Dumpling AI](https://github.com/Dumpling-AI/mcp-server-dumplingai)** - Access data, web scraping, and document conversion APIs by [Dumpling AI](https://www.dumplingai.com/)\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/58178984\" alt=\"Dynatrace Logo\" /> **[Dynatrace](https://github.com/dynatrace-oss/dynatrace-mcp)** - Manage and interact with the [Dynatrace Platform ](https://www.dynatrace.com/platform) for real-time observability and monitoring.\n- <img height=\"12\" width=\"12\" src=\"https://e2b.dev/favicon.ico\" alt=\"E2B Logo\" /> **[E2B](https://github.com/e2b-dev/mcp-server)** - Run code in secure sandboxes hosted by [E2B](https://e2b.dev)\n- <img height=\"12\" width=\"12\" src=\"https://www.edgee.cloud/favicon.ico\" alt=\"Edgee Logo\" /> **[Edgee](https://github.com/edgee-cloud/mcp-server-edgee)** - Deploy and manage [Edgee](https://www.edgee.cloud) components and projects\n- <img height=\"12\" width=\"12\" src=\"https://static.edubase.net/media/brand/favicon/favicon-32x32.png\" alt=\"EduBase Logo\" /> **[EduBase](https://github.com/EduBase/MCP)** - Interact with [EduBase](https://www.edubase.net), a comprehensive e-learning platform with advanced quizzing, exam management, and content organization capabilities\n- <img height=\"12\" width=\"12\" src=\"https://www.elastic.co/favicon.ico\" alt=\"Elasticsearch Logo\" /> **[Elasticsearch](https://github.com/elastic/mcp-server-elasticsearch)** - Query your data in [Elasticsearch](https://www.elastic.co/elasticsearch)\n- <img height=\"12\" width=\"12\" src=\"https://www.elastic.co/favicon.ico\" alt=\"Elasticsearch Memory Logo\" /> **[Elasticsearch Memory](https://github.com/fredac100/elasticsearch-memory-mcp)** - Persistent memory with hierarchical categorization, semantic search, and intelligent auto-detection. Install via [PyPI](https://pypi.org/project/elasticsearch-memory-mcp/).\n- <img height=\"12\" width=\"12\" src=\"https://elasticemail.com/favicon.ico\" alt=\"Elastic Email Logo\" /> **[Elastic Email](https://github.com/ElasticEmail/elasticemail-mcp-server)** - Elastic Email MCP Server delivers full-scale email capabilities to the next generation of AI agents and MCP-compatible environments.\n- <img height=\"12\" width=\"12\" src=\"https://github.com/EmberAGI/arbitrum-vibekit/blob/main/img/Ember%20Black.png?raw=true\" alt=\"Ember AI Logo\" /> **[Ember AI](https://docs.emberai.xyz/)** - A unified MCP server that enables AI agents to execute cross-chain DeFi strategies.\n- <img height=\"12\" width=\"12\" src=\"https://cdn.prod.website-files.com/656eaf5c6da3527caf362363/656ecc07555afac40df4c40e_Facicon.png\" alt=\"Endor Labs Logo\" /> **[Endor Labs](https://docs.endorlabs.com/deployment/ide/mcp/)** - Find and fix security risks in you code. Integrate [Endor Labs](https://endorlabs.com) to scan and secure your code from vulnerabilities and secret leaks.\n- <img height=\"12\" width=\"12\" src=\"https://esignatures.com/favicon.ico\" alt=\"eSignatures Logo\" /> **[eSignatures](https://github.com/esignaturescom/mcp-server-esignatures)** - Contract and template management for drafting, reviewing, and sending binding contracts.\n- <img height=\"12\" width=\"12\" src=\"https://rainmaker.espressif.com/favicon.ico\" alt=\"ESP RainMaker Logo\" /> **[ESP RainMaker](https://github.com/espressif/esp-rainmaker-mcp)** - Official Espressif MCP Server to Control and Manage ESP RainMaker Devices.\n- <img height=\"12\" width=\"12\" src=\"https://exa.ai/images/favicon-32x32.png\" alt=\"Exa Logo\" /> **[Exa](https://github.com/exa-labs/exa-mcp-server)** - Search Engine made for AIs by [Exa](https://exa.ai)\n- <img height=\"12\" width=\"12\" src=\"https://www.explorium.ai/wp-content/uploads/2025/04/Favicon-Purple-512x512-1-150x150.png\" alt=\"Explorium Logo\" /> **[Explorium](https://github.com/explorium-ai/mcp-explorium)** - B2B data and infrastructure for AI SDR & GTM Agents [Explorium](https://www.explorium.ai)\n- **[FalkorDB](https://github.com/FalkorDB/FalkorDB-MCPServer)** - FalkorDB graph database server get schema and read/write-cypher [FalkorDB](https://www.falkordb.com)\n- <img height=\"12\" width=\"12\" src=\"https://fetchserp.com/icon.png\" alt=\"fetchSERP Logo\" /> **[fetchSERP](https://github.com/fetchSERP/fetchserp-mcp-server-node)** - All-in-One SEO & Web Intelligence Toolkit API [fetchSERP](https://www.fetchserp.com/)\n- <img height=\"12\" width=\"12\" src=\"https://fewsats.com/favicon.svg\" alt=\"Fewsats Logo\" /> **[Fewsats](https://github.com/Fewsats/fewsats-mcp)** - Enable AI Agents to purchase anything in a secure way using [Fewsats](https://fewsats.com)\n- <img height=\"12\" width=\"12\" src=\"https://fibery.io/favicon.svg\" alt=\"Fibery Logo\" /> **[Fibery](https://github.com/Fibery-inc/fibery-mcp-server)** - Perform queries and entity operations in your [Fibery](https://fibery.io) workspace.\n- <img height=\"12\" width=\"12\" src=\"https://static.figma.com/app/icon/2/favicon.ico\" alt=\"Figma Logo\" /> **[Figma](https://github.com/figma/mcp-server-guide)** - Bring Figma directly into your workflow by providing important design information and context to AI agents generating code from design files with the official [Figma](https://www.figma.com) MCP server.\n- <img height=\"12\" width=\"12\" src=\"https://financialdatasets.ai/favicon.ico\" alt=\"Financial Datasets Logo\" /> **[Financial Datasets](https://github.com/financial-datasets/mcp-server)** - Stock market API made for AI agents\n- <img height=\"12\" width=\"12\" src=\"https://www.gstatic.com/devrel-devsite/prod/v7aeef7f1393bb1d75a4489145c511cdd5aeaa8e13ad0a83ec1b5b03612e66330/firebase/images/favicon.png\" alt=\"Firebase Logo\" /> **[Firebase](https://github.com/firebase/firebase-tools/blob/master/src/mcp)** - Firebase's experimental [MCP Server](https://firebase.google.com/docs/cli/mcp-server) to power your AI Tools\n- <img height=\"12\" width=\"12\" src=\"https://firecrawl.dev/favicon.ico\" alt=\"Firecrawl Logo\" /> **[Firecrawl](https://github.com/firecrawl/firecrawl-mcp-server)** - Extract web data with [Firecrawl](https://firecrawl.dev)\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/100200663?s=200&v=4\" alt=\"Firefly Logo\" /> **[Firefly](https://github.com/gofireflyio/firefly-mcp)** - Integrates, discovers, manages, and codifies cloud resources with [Firefly](https://firefly.ai).\n- <img height=\"12\" width=\"12\" src=\"https://fireproof.storage/favicon.ico\" alt=\"Fireproof Logo\" /> **[Fireproof](https://github.com/fireproof-storage/mcp-database-server)** - Immutable ledger database with live synchronization\n- <img height=\"12\" width=\"12\" src=\"https://fixparser.dev/favicon.ico\" alt=\"FIXParser Logo\" /> **[FIXParser](https://gitlab.com/logotype/fixparser/-/tree/main/packages/fixparser-plugin-mcp)** - A modern FIX Protocol engine for AI-powered trading agents\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/52471808\" alt=\"Fluid Attacks Logo\" /> **[Fluid Attacks](https://github.com/fluidattacks/mcp)** - Interact with the [Fluid Attacks](https://fluidattacks.com/) API, enabling vulnerability management, organization insights, and GraphQL query execution.\n- <img height=\"12\" width=\"12\" src=\"https://flutterwave.com/favicon.ico\" alt=\"Flutterwave Logo\" /> **[Flutterwave](https://github.com/bajoski34/mcp-flutterwave/tree/main)** - Interact with Flutterwave payment solutions API, to manage transactions, payment links and more.\n- <img height=\"12\" width=\"12\" src=\"https://forevervm.com/icon.png\" alt=\"ForeverVM Logo\" /> **[ForeverVM](https://github.com/jamsocket/forevervm/tree/main/javascript/mcp-server)** - Run Python in a code sandbox.\n- <img height=\"12\" width=\"12\" src=\"https://gcore.com/assets/favicon/favicon-16x16.png\" alt=\"Gcore Logo\" /> **[Gcore](https://github.com/G-Core/gcore-mcp-server)** - Interact with Gcore platform services via LLM assistants, providing unified access to CDN, GPU Cloud & AI Inference, Video Streaming, WAAP, and cloud resources including instances and networks.\n- <img height=\"12\" width=\"12\" src=\"https://app.gibsonai.com/favicon.ico\" alt=\"GibsonAI Logo\" /> **[GibsonAI](https://github.com/GibsonAI/mcp)** - AI-Powered Cloud databases: Build, migrate, and deploy database instances with AI\n- <img height=\"12\" width=\"12\" src=\"https://gitea.com/assets/img/favicon.svg\" alt=\"Gitea Logo\" /> **[Gitea](https://gitea.com/gitea/gitea-mcp)** - Interact with Gitea instances with MCP.\n- <img height=\"12\" width=\"12\" src=\"https://gitee.com/favicon.ico\" alt=\"Gitee Logo\" /> **[Gitee](https://github.com/oschina/mcp-gitee)** - Gitee API integration, repository, issue, and pull request management, and more.\n- <img height=\"12\" width=\"12\" src=\"https://cdn.prod.website-files.com/5ee25cbe47310017adf964da/6323888a9b9f4e22a7bc766b_GG%20Favicon.svg\" alt=\"GitGuardian Logo\" /> **[GitGuardian](https://github.com/GitGuardian/gg-mcp)** - GitGuardian official MCP server - Scan projects using GitGuardian's industry-leading API, which features over 500 secret detectors to prevent credential leaks before they reach public repositories. Resolve security incidents directly with rich contextual data for rapid, automated remediation.\n- <img height=\"12\" width=\"12\" src=\"https://github.githubassets.com/assets/GitHub-Mark-ea2971cee799.png\" alt=\"GitHub Logo\" /> **[GitHub](https://github.com/github/github-mcp-server)** - GitHub's official MCP Server.\n- <img height=\"12\" width=\"12\" src=\"https://www.gitkraken.com/wp-content/uploads/2021/03/android-chrome-144x144-1.png\" alt=\"GitKraken Logo\" /> **[GitKraken](https://github.com/gitkraken/gk-cli?tab=readme-ov-file#mcp-server)** - A CLI for interacting with GitKraken APIs. Includes an MCP server via `gk mcp` that not only wraps GitKraken APIs, but also Jira, GitHub, GitLab, and more.\n- <img height=\"12\" width=\"12\" src=\"https://gitlab.com/favicon.ico\" alt=\"GitLab Logo\" /> **[GitLab](https://docs.gitlab.com/user/gitlab_duo/model_context_protocol/mcp_server/)** - GitLab's official MCP server enabling AI tools to securely access GitLab project data, manage issues, and perform repository operations via OAuth 2.0.\n- <img height=\"12\" width=\"12\" src=\"https://app.glean.com/images/favicon3-196x196.png\" alt=\"Glean Logo\" /> **[Glean](https://github.com/gleanwork/mcp-server)** - Enterprise search and chat using Glean's API.\n- <img height=\"12\" width=\"12\" src=\"https://cdn.jsdelivr.net/gh/jsdelivr/globalping-media@refs/heads/master/icons/android-chrome-192x192.png\" alt=\"Globalping Logo\" /> **[Globalping](https://github.com/jsdelivr/globalping-mcp-server)** - Access a network of thousands of probes to run network commands like ping, traceroute, mtr, http and DNS resolve.\n- <img height=\"12\" width=\"12\" src=\"https://gnucleus.ai/favicon.ico\" alt=\"gNucleus Logo\" /> **[gNucleus Text-To-CAD](https://github.com/gNucleus/text-to-cad-mcp)** - Generate CAD parts and assemblies from text using gNucleus AI models.\n- <img height=\"12\" width=\"12\" src=\"https://api.gologin.com/favicon.ico\" alt=\"GoLogin Logo\" /> **[GoLogin MCP server](https://github.com/gologinapp/gologin-mcp)** - Manage your GoLogin browser profiles and automation directly through AI conversations!\n- <img height=\"12\" width=\"12\" src=\"https://www.gstatic.com/cgc/favicon.ico\" alt=\"Google Cloud Logo\" /> **[Google Cloud Run](https://github.com/GoogleCloudPlatform/cloud-run-mcp)** - Deploy code to Google Cloud Run\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/3717923?s=200&v=4\" alt=\"Google Maps Platform Logo\" /> **[Google Maps Platform Code Assist](https://github.com/googlemaps/platform-ai/tree/main/packages/code-assist)** - Ground agents on fresh, official documentation and code samples for optimal geo-related guidance and code..\n- <img height=\"12\" width=\"12\" src=\"https://cdn.prod.website-files.com/6605a2979ff17b2cd1939cd4/6605a460de47e7596ed84f06_icon256.png\" alt=\"gotoHuman Logo\" /> **[gotoHuman](https://github.com/gotohuman/gotohuman-mcp-server)** - Human-in-the-loop platform - Allow AI agents and automations to send requests for approval to your [gotoHuman](https://www.gotohuman.com) inbox.\n- <img height=\"12\" width=\"12\" src=\"https://grafana.com/favicon.ico\" alt=\"Grafana Logo\" /> **[Grafana](https://github.com/grafana/mcp-grafana)** - Search dashboards, investigate incidents and query datasources in your Grafana instance\n- <img height=\"12\" width=\"12\" src=\"https://grafbase.com/favicon.ico\" alt=\"Grafbase Logo\" /> **[Grafbase](https://github.com/grafbase/grafbase/tree/main/crates/mcp)** - Turn your GraphQL API into an efficient MCP server with schema intelligence in a single command.\n- <img height=\"12\" width=\"12\" src=\"https://cdn.prod.website-files.com/5f5e90c17e7c9eb95c7acb17/61d3457a519242f2c75c725c_favicon.png\" alt=\"Grain Logo\" /> **[Grain](https://grain.com/release-note/06-18-2025)** - Access your Grain meetings notes & transcripts directly in claude and generate reports with native Claude Prompts.\n- <img height=\"12\" width=\"12\" src=\"https://framerusercontent.com/images/KCOWBYLKunDff1Dr452y6EfjiU.png\" alt=\"Graphlit Logo\" /> **[Graphlit](https://github.com/graphlit/graphlit-mcp-server)** - Ingest anything from Slack to Gmail to podcast feeds, in addition to web crawling, into a searchable [Graphlit](https://www.graphlit.com) project.\n- <img height=\"12\" width=\"12\" src=\"https://cdn.prod.website-files.com/64a5291e7847ac04fe1531ad/64a529af2f1fc7debc26f2a6_favicon-32x32.avif\" alt=\"Gremlin favicon\" /> **[Gremlin](https://github.com/gremlin/mcp)** - The official [Gremlin](https://www.gremlin.com) MCP server. Analyze your reliability posture, review recent tests and chaos engineering experiments, and create detailed reports.\n- <img height=\"12\" width=\"12\" src=\"https://greptime.com/favicon.ico\" alt=\"Greptime Logo\" /> **[GreptimeDB](https://github.com/GreptimeTeam/greptimedb-mcp-server)** - Provides AI assistants with a secure and structured way to explore and analyze data in [GreptimeDB](https://github.com/GreptimeTeam/greptimedb).\n- <img height=\"12\" width=\"12\" src=\"https://growi.org/assets/images/favicon.ico\" alt=\"GROWI Logo\" /> **[GROWI](https://github.com/growilabs/growi-mcp-server)** - Official MCP Server to integrate with GROWI APIs.\n- <img height=\"12\" width=\"12\" src=\"https://gyazo.com/favicon.ico\" alt=\"Gyazo Logo\" /> **[Gyazo](https://github.com/nota/gyazo-mcp-server)** - Search, fetch, upload, and interact with Gyazo images, including metadata and OCR data.\n- <img height=\"12\" width=\"12\" src=\"https://cdn.prod.website-files.com/6374050260446c42f94dc90f/63d828be3e13d32ee6973f35_favicon-32x32.png\" alt=\"Harper Logo\" /> **[Harper](https://github.com/HarperDB/mcp-server)** - An MCP server providing an interface for MCP clients to access data within [Harper](https://www.harpersystems.dev/).\n- <img height=\"12\" width=\"12\" src=\"https://www.herokucdn.com/favicons/favicon.ico\" alt=\"Heroku Logo\" /> **[Heroku](https://github.com/heroku/heroku-mcp-server)** - Interact with the Heroku Platform through LLM-driven tools for managing apps, add-ons, dynos, databases, and more.\n- <img height=\"12\" width=\"12\" src=\"https://heyoncall.com/favicon.ico\" alt=\"HeyOnCall Logo\" /> **[HeyOnCall](https://heyoncall.com/blog/mcp-server-for-paging-a-human)** - Page a human, sending critical or non-critical alerts to the free [HeyOnCall](https://heyoncall.com/) iOS or Android apps.\n- <img height=\"12\" width=\"12\" src=\"https://hillnote.com/favicon.ico\" alt=\"Hillnote Logo\" /> **[Hillnote](https://github.com/Rajathbail/hillnote-mcp-server)** - search, edit, save and create documents to your [Hillnote](https://hillnote.com) workspace, a markdown-first editor that stores files locally.\n- <img height=\"12\" width=\"12\" src=\"https://hiveintelligence.xyz/favicon.ico\" alt=\"Hive Intelligence Logo\" /> **[Hive Intelligence](https://github.com/hive-intel/hive-crypto-mcp)** - Ultimate cryptocurrency MCP for AI assistants with unified access to crypto, DeFi, and Web3 analytics\n- <img height=\"12\" width=\"12\" src=\"https://www.hiveflow.ai/favicon.ico\" alt=\"Hiveflow Logo\" /> **[Hiveflow](https://github.com/hiveflowai/hiveflow-mcp-server)** - Create, manage, and execute agentic AI workflows directly from your assistant.\n- <img height=\"12\" width=\"12\" src=\"https://img.alicdn.com/imgextra/i3/O1CN01d9qrry1i6lTNa2BRa_!!6000000004364-2-tps-218-200.png\" alt=\"Hologres Logo\" /> **[Hologres](https://github.com/aliyun/alibabacloud-hologres-mcp-server)** - Connect to a [Hologres](https://www.alibabacloud.com/en/product/hologres) instance, get table metadata, query and analyze data.\n- <img height=\"12\" width=\"12\" src=\"https://brew.sh/assets/img/favicon.ico\" alt=\"Homebrew Logo\" /> **[Homebrew](https://docs.brew.sh/MCP-Server)** Allows [Homebrew](https://brew.sh) users to run Homebrew commands locally.\n- <img height=\"12\" width=\"12\" src=\"https://www.honeycomb.io/favicon.ico\" alt=\"Honeycomb Logo\" /> **[Honeycomb](https://github.com/honeycombio/honeycomb-mcp)** Allows [Honeycomb](https://www.honeycomb.io/) Enterprise customers to query and analyze their data, alerts, dashboards, and more; and cross-reference production behavior with the codebase.\n- <img height=\"12\" width=\"12\" src=\"https://hopx.ai/favicon.ico\" alt=\"HOPX Logo\" /> **[HOPX](https://github.com/hopx-ai/mcp)** - Execute Python, JavaScript, Bash, and Go code in isolated cloud containers with sub-150ms startup times. Pre-installed data science libraries (pandas, numpy, matplotlib) for AI-powered data analysis and code testing.\n- <img height=\"12\" width=\"12\" src=\"https://static.hsinfrastatic.net/StyleGuideUI/static-3.438/img/sprocket/favicon-32x32.png\" alt=\"HubSpot Logo\" /> **[HubSpot](https://developer.hubspot.com/mcp)** - Connect, manage, and interact with [HubSpot](https://www.hubspot.com/) CRM data\n- <img height=\"12\" width=\"12\" src=\"https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.svg\" alt=\"HuggingFace Logo\" /> **[Hugging Face](https://huggingface.co/settings/mcp)** - Connect to the Hugging Face Hub APIs programmatically: semantic search for spaces and papers, exploration of datasets and models, and access to all compatible MCP Gradio tool spaces!\n- <img height=\"12\" width=\"12\" src=\"https://hunter.io/favicon.ico\" alt=\"Hunter Logo\" /> **[Hunter](https://github.com/hunter-io/hunter-mcp)** - Interact with the [Hunter API](https://hunter.io) to get B2B data using natural language.\n- <img height=\"12\" width=\"12\" src=\"https://app.hyperbolic.xyz/hyperbolic-logo.svg\" alt=\"Hyperbolic Labs Logo\" /> **[Hyperbolic](https://github.com/HyperbolicLabs/hyperbolic-mcp)** - Interact with Hyperbolic's GPU cloud, enabling agents and LLMs to view and rent available GPUs, SSH into them, and run GPU-powered workloads for you.\n- <img height=\"12\" width=\"12\" src=\"https://hyperbrowser-assets-bucket.s3.us-east-1.amazonaws.com/Hyperbrowser-logo.png\" alt=\"Hyperbrowsers23 Logo\" /> **[Hyperbrowser](https://github.com/hyperbrowserai/mcp)** - [Hyperbrowser](https://www.hyperbrowser.ai/) is the next-generation platform empowering AI agents and enabling effortless, scalable browser automation.\n- **[IBM watsonx.data intelligence](https://github.com/IBM/data-intelligence-mcp-server)** - Find, understand, and work with your data in the watsonx.data intelligence governance & catalog, data quality, data lineage, and data product hub\n- **[IBM wxflows](https://github.com/IBM/wxflows/tree/main/examples/mcp/javascript)** - Tool platform by IBM to build, test and deploy tools for any data source\n- <img height=\"12\" width=\"12\" src=\"https://improvedigital.com/favicon.ico\" alt=\"Improve Digital Icon\" /> **[Improve Digital Publisher MCP](https://github.com/azerion/improvedigital-publisher-mcp-server)** - An MCP server that enables publishers to integrate [Improve Digital’s](https://improvedigital.com/) inventory management system with their AI tools or agents.\n- <img height=\"12\" width=\"12\" src=\"https://www.getinboxzero.com/icon.png\" alt=\"Inbox Zero Logo\" /> **[Inbox Zero](https://github.com/elie222/inbox-zero/tree/main/apps/mcp-server)** - AI personal assistant for email [Inbox Zero](https://www.getinboxzero.com)\n- <img height=\"12\" width=\"12\" src=\"https://www.inflectra.com/Favicon.ico\" alt=\"Inflectra Logo\" /> **[Inflectra Spira](https://github.com/Inflectra/mcp-server-spira)** - Connect to your instance of the SpiraTest, SpiraTeam or SpiraPlan application lifecycle management platform by [Inflectra](https://www.inflectra.com)\n- <img height=\"12\" width=\"12\" src=\"https://cdn-web.infobip.com/uploads/2025/05/infobip-symbol-orange.png\" alt=\"Infobip Logo\" /> **[Infobip](https://github.com/infobip/mcp)** - MCP server for integrating [Infobip](https://www.infobip.com/) global cloud communication platform. It equips AI agents with communication superpowers, allowing them to send and receive SMS and RCS messages, interact with WhatsApp and Viber, automate communication workflows, and manage customer data, all in a production-ready environment.\n- <img height=\"12\" width=\"12\" src=\"https://inkeep.com/favicon.ico\" alt=\"Inkeep Logo\" /> **[Inkeep](https://github.com/inkeep/mcp-server-python)** - RAG Search over your content powered by [Inkeep](https://inkeep.com)\n- <img height=\"12\" width=\"12\" src=\"https://integration.app/favicon.ico\" alt=\"Integration App Icon\" /> **[Integration App](https://github.com/integration-app/mcp-server)** - Interact with any other SaaS applications on behalf of your customers.\n- <img height=\"12\" width=\"12\" src=\"https://www.ip2location.io/favicon.ico\" alt=\"IP2Location.io Icon\" /> **[IP2Location.io](https://github.com/ip2location/mcp-ip2location-io)** - Interact with IP2Location.io API to retrieve the geolocation information for an IP address.\n- <img height=\"12\" width=\"12\" src=\"https://static.iplocate.io/custom/logo-square-rounded.png\" alt=\"IPLocate Icon\" /> **[IPLocate](https://github.com/iplocate/mcp-server-iplocate)** - Look up IP address geolocation, network information, detect proxies and VPNs, and find abuse contact details using [IPLocate.io](https://www.iplocate.io)\n- <img height=\"12\" width=\"12\" src=\"https://jellyfish.co/favicon.ico\" alt=\"Jellyfish Logo\" /> **[Jellyfish](https://github.com/Jellyfish-AI/jellyfish-mcp)** – Give your AI agent context about your team's software engineering allocations and workflow via the [Jellyfish](https://jellyfish.co) platform\n- <img height=\"12\" width=\"12\" src=\"https://jenkins.io/images/logos/jenkins/jenkins.svg\" alt=\"Jenkins Logo\" /> **[Jenkins](https://plugins.jenkins.io/mcp-server/)** - Official Jenkins MCP Server plugin enabling AI assistants to manage builds, check job statuses, retrieve logs, and integrate with CI/CD pipelines through standardized MCP interface.\n- <img height=\"12\" width=\"12\" src=\"https://cdn.simpleicons.org/jetbrains\" /> **[JetBrains](https://www.jetbrains.com/help/idea/mcp-server.html)** – Work on your code with JetBrains IDEs: IntelliJ IDEA, PhpStorm, etc.\n- <img height=\"12\" width=\"12\" src=\"https://speedmedia.jfrog.com/08612fe1-9391-4cf3-ac1a-6dd49c36b276/media.jfrog.com/wp-content/uploads/2019/04/20131046/Jfrog16-1.png\" alt=\"JFrog Logo\" /> **[JFrog](https://github.com/jfrog/mcp-jfrog)** - Model Context Protocol (MCP) Server for the [JFrog](https://jfrog.com/) Platform API, enabling repository management, build tracking, release lifecycle management, and more.\n- <img height=\"12\" width=\"12\" src=\"https://kagi.com/favicon.ico\" alt=\"Kagi Logo\" /> **[Kagi Search](https://github.com/kagisearch/kagimcp)** - Search the web using Kagi's search API\n- 📅 **[Kalendis](https://github.com/kalendis-dev/kalendis-mcp)** - Generate TypeScript clients and API route handlers for the Kalendis scheduling API across multiple frameworks (Next.js, Express, Fastify, NestJS), streamlining integration of availability management and booking functionality.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/319096?s=48&v=4\" alt=\"Kaltura Logo\" /> **[Kaltura](https://github.com/kaltura/mcp-events)** - Manage [Kaltura Event Platform](https://corp.kaltura.com/blog/best-virtual-event-platform/#what-is-a-virtual-event-platform-0). Provide tools and resources for creating, managing, and interacting with Kaltura virtual events.\n- <img height=\"12\" width=\"12\" src=\"https://kash.click/favicon.ico\" alt=\"Kash Logo\" /> **[Kash.click](https://github.com/paracetamol951/caisse-enregistreuse-mcp-server)** - Gives AI access to your sales, clients, orders, tax information, payments, and all the insights on your business\n- <img height=\"12\" width=\"12\" src=\"https://connection.keboola.com/favicon.ico\" alt=\"Keboola Logo\" /> **[Keboola](https://github.com/keboola/keboola-mcp-server)** - Build robust data workflows, integrations, and analytics on a single intuitive platform.\n- <img height=\"12\" width=\"12\" src=\"https://mcp.onkernel.com/favicon.svg\" alt=\"Kernel Logo\" /> **[Kernel](https://github.com/onkernel/kernel-mcp-server)** – Access Kernel's cloud‑based browsers via MCP.\n- <img height=\"12\" width=\"12\" src=\"https://keywordseverywhere.com/favicon.ico\" alt=\"Keywords Everywhere Logo\" /> **[Keywords Everywhere](https://api.keywordseverywhere.com/docs/#/mcp_integration)** – Access SEO data through the official Keywords Everywhere API MCP server.\n- <img height=\"12\" width=\"12\" src=\"https://keywordspeopleuse.com/favicon.ico\" alt=\"KeywordsPeopleUse Logo\" /> **[KeywordsPeopleUse.com](https://github.com/data-skunks/kpu-mcp)** - Find questions people ask online with [KeywordsPeopleUse](https://keywordspeopleuse.com).\n- <img height=\"12\" width=\"12\" src=\"https://kiln.tech/images/animated_logo.svg\" alt=\"Kiln Logo\" /> **[Kiln](https://github.com/Kiln-AI/Kiln)** - A free open-source platform for building production-ready AI systems. It supports RAG pipelines, AI agents, MCP tool-calling, evaluations, synthetic data generation, and fine-tuning — all in one unified framework by [Kiln-AI](https://kiln.tech/).\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/4815054\" alt=\"Kintone Logo\" /> **[Kintone](https://github.com/kintone/mcp-server)** - The official local MCP server for [Kintone](https://kintone.com).\n- <img height=\"12\" width=\"12\" src=\"https://kirokuforms.com/favicon.svg\" alt=\"KirokuForms Logo\" /> **[KirokuForms](https://www.kirokuforms.com/ai/mcp)** - [KirokuForms](https://www.kirokuforms.com) is an AI-powered form platform combining professional form building with Human-in-the-Loop (HITL) capabilities. Create custom forms, collect submissions, and integrate human oversight into AI workflows through [MCP integration](https://kirokuforms.com/ai/mcp).\n- <img height=\"12\" width=\"12\" src=\"https://raw.githubusercontent.com/kiteworks/mcp/main/docs/img/kiteworks_logo-small.png\" alt=\"Kiteworks Logo\" /> **[Kiteworks](https://github.com/kiteworks/mcp)** - Official MCP server to interact with the [Kiteworks Private Data Network (PDN) platform](https://kiteworks.com).\n- <img height=\"12\" width=\"12\" src=\"https://raw.githubusercontent.com/klavis-ai/klavis/main/static/klavis-ai.png\" alt=\"Klavis Logo\" /> **[Klavis ReportGen](https://github.com/Klavis-AI/klavis/tree/main/mcp_servers/report_generation)** - Create professional reports from a simple user query.\n- <img height=\"12\" width=\"12\" src=\"https://www.klaviyo.com/media/Favicon-16by16.png\" alt=\"Klaviyo Logo\" /> **[Klaviyo](https://developers.klaviyo.com/en/docs/klaviyo_mcp_server)** - Interact with your [Klaviyo](https://www.klaviyo.com/) marketing data.\n- <img height=\"12\" width=\"12\" src=\"https://platform.kluster.ai/logo-light.svg\" alt=\"kluster.ai Logo\" /> **[kluster.ai](https://docs.kluster.ai/get-started/mcp/overview/)** - kluster.ai provides MCP servers that bring AI services directly into your development workflow, including guardrails like hallucination detection.\n- <img height=\"12\" width=\"12\" src=\"https://cdn.prod.website-files.com/6347ea26001f0287c592ff91/649953ef7a9ffe1f3e492b5a_Knit%20Logo.svg\" alt=\"Knit Logo\" /> **[Knit MCP Server](https://developers.getknit.dev/docs/knit-mcp-server-getting-started)** - Production-ready remote MCP servers that enable you to connect with 10000+ tools across CRM, HRIS, Payroll, Accounting, ERP, Calendar, Expense Management, and Chat categories.\n- <img height=\"12\" width=\"12\" src=\"https://knock.app/favicon/favicon-dark.svg\" alt=\"Knock Logo\" /> **[Knock MCP Server](https://github.com/knocklabs/agent-toolkit#model-context-protocol-mcp)** - Send product and customer messaging across email, in-app, push, SMS, Slack, MS Teams.\n- <img height=\"12\" width=\"12\" src=\"https://kumo-sdk-public.s3.us-west-2.amazonaws.com/rfm-colabs/kumo_ai_logo.jpeg\" alt=\"Kumo Logo\" /> **[Kumo](https://github.com/kumo-ai/kumo-rfm-mcp)** - MCP Server to interact with KumoRFM, a foundation model for generating predictions from your relational data.\n- <img height=\"12\" width=\"12\" src=\"https://www.kurrent.io/favicon.ico\" alt=\"Kurrent Logo\" /> **[KurrentDB](https://github.com/kurrent-io/mcp-server)** - This is a simple MCP server to help you explore data and prototype projections faster on top of KurrentDB.\n- <img height=\"12\" width=\"12\" src=\"https://kuzudb.com/favicon.ico\" alt=\"Kuzu Logo\" /> **[Kuzu](https://github.com/kuzudb/kuzu-mcp-server)** - This server enables LLMs to inspect database schemas and execute queries on the provided Kuzu graph database. See [blog](https://blog.kuzudb.com/post/2025-03-23-kuzu-mcp-server/)) for a debugging use case.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/187484914\" alt=\"KWDB Logo\" /> **[KWDB](https://github.com/KWDB/kwdb-mcp-server)** - Reading, writing, querying, modifying data, and performing DDL operations with data in your KWDB Database.\n- <img height=\"12\" width=\"12\" src=\"https://kweenkl.com/favicon.ico\" alt=\"kweenkl Logo\" /> **[kweenkl](https://github.com/antoinedelorme/kweenkl-mcp)** - Send push notifications from AI assistants using natural language. Pre-launch demo available with example webhook token.\n- <img height=\"12\" width=\"12\" src=\"https://labelstud.io/favicon-16x16.png\" alt=\"Label Studio Logo\" /> **[Label Studio](https://github.com/HumanSignal/label-studio-mcp-server)** - Open Source data labeling platform.\n- <img src=\"https://avatars.githubusercontent.com/u/188884511?s=48&v=4\" alt=\"Lambda Capture\" width=\"12\" height=\"12\"> **[Lambda Capture](https://github.com/lambda-capture/mcp-server)** - Macroeconomic Forecasts & Semantic Context from Federal Reserve, Bank of England, ECB.\n- <img src=\"https://www.lambdatest.com/resources/images/header/professional-service.svg\" alt=\"LambdaTest MCP server\" width=\"12\" height=\"12\"> **[LambdaTest](https://www.lambdatest.com/mcp)** - LambdaTest MCP Servers ranging from Accessibility, SmartUI, Automation, and HyperExecute allows you to connect AI assistants with your testing workflow, streamlining setup, analyzing failures, and generating fixes to speed up testing and improve efficiency.\n- <img height=\"12\" width=\"12\" src=\"https://langfuse.com/favicon.ico\" alt=\"Langfuse Logo\" /> **[Langfuse Prompt Management](https://github.com/langfuse/mcp-server-langfuse)** - Open-source tool for collaborative editing, versioning, evaluating, and releasing prompts.\n- <img height=\"12\" width=\"12\" src=\"https://laratranslate.com/favicon.ico\" alt=\"Lara Translate Logo\" /> **[Lara Translate](https://github.com/translated/lara-mcp)** - MCP Server for Lara Translate API, enabling powerful translation capabilities with support for language detection and context-aware translations.\n- <img height=\"12\" width=\"12\" src=\"https://last9.io/favicon.png\" alt=\"Last9 Logo\" /> **[Last9](https://github.com/last9/last9-mcp-server)** - Seamlessly bring real-time production context—logs, metrics, and traces—into your local environment to auto-fix code faster.\n- <img height=\"12\" width=\"12\" src=\"https://www.launchdarkly.com/favicon.ico\" alt=\"LaunchDarkly Logo\" /> **[LaunchDarkly](https://github.com/launchdarkly/mcp-server)** - LaunchDarkly is a continuous delivery platform that provides feature flags as a service and allows developers to iterate quickly and safely.\n- <img height=\"12\" width=\"12\" src=\"https://www.line.me/favicon-32x32.png\" alt=\"LINE Logo\" /> **[LINE](https://github.com/line/line-bot-mcp-server)** - Integrates the LINE Messaging API to connect an AI Agent to the LINE Official Account.\n- <img height=\"12\" width=\"12\" src=\"https://linear.app/favicon.ico\" alt=\"Linear Logo\" /> **[Linear](https://linear.app/docs/mcp)** - Search, create, and update Linear issues, projects, and comments.\n- <img height=\"12\" width=\"12\" src=\"https://lingo.dev/favicon.ico\" alt=\"Lingo.dev Logo\" /> **[Lingo.dev](https://github.com/lingodotdev/lingo.dev/blob/main/mcp.md)** - Make your AI agent speak every language on the planet, using [Lingo.dev](https://lingo.dev) Localization Engine.\n- <img height=\"12\" width=\"12\" src=\"https://ligo.ertiqah.com/favicon.avif\" alt=\"LiGo Logo\" /> **[LinkedIn MCP Runner](https://github.com/ertiqah/linkedin-mcp-runner)** - Write, edit, and schedule LinkedIn posts right from ChatGPT and Claude with [LiGo](https://ligo.ertiqah.com/).\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/175112039?s=200&v=4\" alt=\"Linkup Logo\" /> **[Linkup](https://github.com/LinkupPlatform/js-mcp-server)** - (JS version) MCP server that provides web search capabilities through Linkup's advanced search API. This server enables AI assistants and development tools to perform intelligent web searches with natural language queries.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/175112039?s=200&v=4\" alt=\"Linkup Logo\" /> **[Linkup](https://github.com/LinkupPlatform/python-mcp-server)** - (Python version) MCP server that provides web search capabilities through Linkup's advanced search API. This server enables AI assistants and development tools to perform intelligent web searches with natural language queries.\n- <img src=\"https://avatars.githubusercontent.com/u/149083471\" alt=\"Lippia.io\" width=\"12\" height=\"12\"> **[Lippia](https://github.com/Lippia-io/Lippia-MCP-Server/blob/main/getting-started.md)** - MCP Server to accelerate Test Automation using Lippia Framework.\n- <img src=\"https://gornschool.com/gorn.png\" alt=\"Lisply\" width=\"12\" height=\"12\"> **[Lisply](https://github.com/gornskew/lisply-mcp)** - Flexible frontend for compliant Lisp-speaking backends.\n- <img height=\"12\" width=\"12\" src=\"https://litmus.io/favicon.ico\" alt=\"Litmus.io Logo\" /> **[Litmus.io](https://github.com/litmusautomation/litmus-mcp-server)** - Official MCP server for configuring [Litmus](https://litmus.io) Edge for Industrial Data Collection, Edge Analytics & Industrial AI.\n- <img height=\"12\" width=\"12\" src=\"https://liveblocks.io/favicon.ico\" alt=\"Liveblocks Logo\" /> **[Liveblocks](https://github.com/liveblocks/liveblocks-mcp-server)** - Ready‑made features for AI & human collaboration—use this to develop your [Liveblocks](https://liveblocks.io) app quicker.\n- <img height=\"12\" width=\"12\" src=\"https://logfire.pydantic.dev/favicon.ico\" alt=\"Logfire Logo\" /> **[Logfire](https://github.com/pydantic/logfire-mcp)** - Provides access to OpenTelemetry traces and metrics through Logfire.\n- <img height=\"12\" width=\"12\" src=\"https://make.magicmealkits.com/favicon.ico\" alt=\"Magic Meal Kits Logo\" /> **[Magic Meal Kits](https://github.com/pureugong/mmk-mcp)** - Unleash Make's Full Potential by [Magic Meal Kits](https://make.magicmealkits.com/)\n- <img height=\"12\" width=\"12\" src=\"https://www.mailgun.com/favicon.ico\" alt=\"Mailgun Logo\" /> **[Mailgun](https://github.com/mailgun/mailgun-mcp-server)** - Interact with Mailgun API.\n- <img height=\"12\" width=\"12\" src=\"https://www.mailjet.com/favicon.ico\" alt=\"Mailjet Logo\" /> **[Mailjet](https://github.com/mailgun/mailjet-mcp-server)** - Official MCP server which allows AI agents to interact with contact, campaign, segmentation, statistics, workflow (and more) APIs from [Sinch Mailjet](https://www.mailjet.com).\n- <img height=\"12\" width=\"12\" src=\"https://www.make.com/favicon.ico\" alt=\"Make Logo\" /> **[Make](https://github.com/integromat/make-mcp-server)** - Turn your [Make](https://www.make.com/) scenarios into callable tools for AI assistants.\n- <img height=\"12\" width=\"12\" src=\"https://static-assets.mapbox.com/branding/favicon/v1/favicon.ico\" alt=\"Mapbox Logo\" /> **[Mapbox](https://github.com/mapbox/mcp-server)** - Unlock geospatial intelligence through Mapbox APIs like geocoding, POI search, directions, isochrones and more.\n- <img height=\"12\" width=\"12\" src=\"https://www.mariadb.com/favicon.ico\" alt=\"MariaDB Logo\" /> **[MariaDB](https://github.com/mariadb/mcp)** - A standard interface for managing and querying MariaDB databases, supporting both standard SQL operations and advanced vector/embedding-based search.\n- <img height=\"14\" width=\"14\" src=\"https://raw.githubusercontent.com/rust-mcp-stack/mcp-discovery/refs/heads/main/docs/_media/mcp-discovery-logo.png\" alt=\"mcp-discovery logo\" /> **[MCP Discovery](https://github.com/rust-mcp-stack/mcp-discovery)** - A lightweight CLI tool built in Rust for discovering MCP server capabilities.\n- <img height=\"12\" width=\"12\" src=\"https://woocommerce.com/favicon.ico\" alt=\"WooCommerce Logo\" /> **[MCP for WooCommerce](https://github.com/iOSDevSK/mcp-for-woocommerce)** - Connect your WooCommerce store to AI assistants with read-only access to products, categories, reviews, and WordPress content. [WordPress plugin](https://wordpress.org/plugins/mcp-for-woocommerce/)\n- <img height=\"12\" width=\"12\" src=\"https://googleapis.github.io/genai-toolbox/favicons/favicon.ico\" alt=\"MCP Toolbox for Databases Logo\" /> **[MCP Toolbox for Databases](https://github.com/googleapis/genai-toolbox)** - Open source MCP server specializing in easy, fast, and secure tools for Databases. Supports  AlloyDB, BigQuery, Bigtable, Cloud SQL, Dgraph, Looker, MySQL, Neo4j, Postgres, Spanner, and more.\n- <img height=\"12\" width=\"12\" src=\"https://www.meilisearch.com/favicon.ico\" alt=\"Meilisearch Logo\" /> **[Meilisearch](https://github.com/meilisearch/meilisearch-mcp)** - Interact & query with Meilisearch (Full-text & semantic search API)\n- <img height=\"12\" width=\"12\" src=\"https://github.com/nfergu/memalot/blob/main/logo.png?raw=true\" alt=\"Memalot Logo\" /> **[Memalot](https://github.com/nfergu/memalot?tab=readme-ov-file#mcp-server)** - Finds memory leaks in Python programs.\n- <img height=\"12\" width=\"12\" src=\"https://memgraph.com/favicon.png\" alt=\"Memgraph Logo\" /> **[Memgraph](https://github.com/memgraph/ai-toolkit/tree/main/integrations/mcp-memgraph)** - Query your data in [Memgraph](https://memgraph.com/) graph database.\n- <img height=\"12\" width=\"12\" src=\"https://www.mercadolibre.com.ar/favicon.ico\" alt=\"MercadoLibre Logo\" /> **[Mercado Libre](https://mcp.mercadolibre.com/)** - Mercado Libre's official MCP server.\n- <img height=\"12\" width=\"12\" src=\"https://www.mercadopago.com/favicon.ico\" alt=\"MercadoPago Logo\" /> **[Mercado Pago](https://mcp.mercadopago.com/)** - Mercado Pago's official MCP server.\n- <img height=\"12\" width=\"12\" src=\"https://metoro.io/static/images/logos/MetoroLogo.png\" alt=\"Metoro Logo\" /> **[Metoro](https://github.com/metoro-io/metoro-mcp-server)** - Query and interact with kubernetes environments monitored by Metoro\n- <img height=\"12\" width=\"12\" src=\"https://knowall.ai/favicon.ico\" alt=\"Microsoft Business Central Logo\" /> **[Microsoft Business Central](https://github.com/knowall-ai/mcp-business-central)** - Manage Dynamics 365 Business Central customers, contacts, sales opportunities, invoices, and vendors\n- <img height=\"12\" width=\"12\" src=\"https://claritystatic.azureedge.net/images/logo.ico\" alt=\"Microsoft Clarity Logo\"/> **[Microsoft Clarity](https://github.com/microsoft/clarity-mcp-server)** - Official MCP Server to get your behavioral analytics data and insights from [Clarity](https://clarity.microsoft.com)\n- <img height=\"12\" width=\"12\" src=\"https://conn-afd-prod-endpoint-bmc9bqahasf3grgk.b01.azurefd.net/releases/v1.0.1735/1.0.1735.4099/commondataserviceforapps/icon.png\" alt=\"Microsoft Dataverse Logo\" /> **[Microsoft Dataverse](https://go.microsoft.com/fwlink/?linkid=2320176)** - Chat over your business data using NL - Discover tables, run queries, retrieve data, insert or update records, and execute custom prompts grounded in business knowledge and context.\n- <img height=\"12\" width=\"12\" src=\"https://learn.microsoft.com/favicon.ico\" alt=\"Microsoft Learn Logo\" /> **[Microsoft Learn Docs](https://github.com/microsoftdocs/mcp)** - An MCP server that provides structured access to Microsoft's official documentation. Retrieves accurate, authoritative, and context-aware technical content for code generation, question answering, and workflow grounding.\n- <img height=\"12\" width=\"12\" src=\"https://statics.teams.microsoft.com/hashedassets/favicon/prod/favicon-9f45b466.ico\" alt=\"Microsoft Teams Logo\" /> **[Microsoft Teams](https://devblogs.microsoft.com/microsoft365dev/announcing-the-updated-teams-ai-library-and-mcp-support/)** - Official Microsoft Teams AI Library with MCP support enabling advanced agent orchestration, multi-agent collaboration, and seamless integration with Teams messaging and collaboration features.\n- <img height=\"12\" width=\"12\" src=\"https://milvus.io/favicon-32x32.png\" /> **[Milvus](https://github.com/zilliztech/mcp-server-milvus)** - Search, Query and interact with data in your Milvus Vector Database.\n- <img src=\"https://www.mimilabs.ai/logos/mimilabsSquare.svg\" alt=\"mimilabs\" width=\"12\" height=\"12\"> **[mimilabs](https://www.mimilabs.ai/mcp)** - A US healthcare data discovery guide for 50+ gov sources and thousands of publicly available US healthcare datasets regarding gov-funded programs, policies, drug pricings, clinical trials, etc.\n- <img height=\"12\" width=\"12\" src=\"https://cdn.mxpnl.com/marketing-site/static/favicons/favicon-32x32.png\" alt=\"Mixpanel Logo\" /> **[Mixpanel](https://docs.mixpanel.com/docs/features/mcp)** - Query and analyze your product analytics data through natural language. This Mixpanel MCP connects AI assistants to your Mixpanel workspace, enabling conversational access to user behavior insights, funnels, retention analysis, and custom reports.\n- <img src=\"https://avatars.githubusercontent.com/u/94089762?s=48&v=4\" alt=\"Mobb\" width=\"12\" height=\"12\"> **[Mobb](https://github.com/mobb-dev/bugsy?tab=readme-ov-file#model-context-protocol-mcp-server)** - The [Mobb Vibe Shield](https://vibe.mobb.ai/) MCP server identifies and remediates vulnerabilities in both human and AI-written code, ensuring your applications remain secure without slowing development.\n- <img height=\"12\" width=\"12\" src=\"https://console.gomomento.com/favicon.ico\" /> **[Momento](https://github.com/momentohq/mcp-momento)** - Momento Cache lets you quickly improve your performance, reduce costs, and handle load at any scale.\n- <img height=\"12\" width=\"12\" src=\"https://www.monday.com/favicon.ico\" alt=\"Monday.com Logo\" /> **[Monday.com](https://github.com/mondaycom/mcp)** - Interact with Monday.com boards, items, accounts and work forms.\n- <img height=\"12\" width=\"12\" src=\"https://www.mongodb.com/favicon.ico\" /> **[MongoDB](https://github.com/mongodb-js/mongodb-mcp-server)** - Both MongoDB Community Server and MongoDB Atlas are supported.\n- <img height=\"12\" width=\"12\" src=\"https://moorcheh.ai/Moorcheh-mcp.ico\" alt=\"Moorcheh Logo\" /> **[Moorcheh](https://github.com/moorcheh-ai/moorcheh-mcp)** - Provides seamless integration with Moorcheh's Embedding, Vector Store, Search, and Gen AI Answer services.\n- <img height=\"12\" width=\"12\" src=\"https://www.motherduck.com/favicon.ico\" alt=\"MotherDuck Logo\" /> **[MotherDuck](https://github.com/motherduckdb/mcp-server-motherduck)** - Query and analyze data with MotherDuck and local DuckDB\n- <img height=\"12\" width=\"12\" src=\"https://docs.mulesoft.com/_/img/favicon.ico\" alt=\"Mulesoft Logo\" /> **[Mulesoft](https://www.npmjs.com/package/@mulesoft/mcp-server)** - Build, deploy, and manage MuleSoft applications with natural language, directly inside any compatible IDE.\n- <img height=\"12\" width=\"12\" src=\"https://www.multiplayer.app/favicon-32x32.png\" alt=\"Multiplayer Logo\" /> **[Multiplayer](https://www.multiplayer.app/docs/ai/mcp-server)** - Analyze your full stack session recordings easily. Record a bug with Multiplayer, analyze and fix it with LLM\n- <img height=\"12\" width=\"12\" src=\"https://raw.githubusercontent.com/NangoHQ/nango/refs/heads/master/docs/images/logo/logo-light-mode.svg\" alt=\"Nango Logo\" /> **[Nango](https://nango.dev/docs/guides/use-cases/ai-tool-calling)** - Integrate your AI agent with 500+ APIs: Auth, custom tools, and observability. Open-source.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/38020270\" alt=\"NanoVMs Logo\" /> **[NanoVMs](https://github.com/nanovms/ops-mcp)** - Easily Build and Deploy unikernels to any cloud.\n- <img height=\"12\" width=\"12\" src=\"https://needle-ai.com/images/needle-logo-orange-2-rounded.png\" alt=\"Needle AI Logo\" /> **[Needle](https://github.com/needle-ai/needle-mcp)** - Production-ready RAG out of the box to search and retrieve data from your own documents.\n- <img height=\"12\" width=\"12\" src=\"https://neo4j.com/favicon.ico\" alt=\"Neo4j Logo\" /> **[Neo4j](https://github.com/neo4j-contrib/mcp-neo4j/)** - Neo4j graph database server (schema + read/write-cypher) and separate graph database backed memory\n- <img height=\"12\" width=\"12\" src=\"https://knowall.ai/favicon.ico\" alt=\"Neo4j Agent Memory Logo\" /> **[Neo4j Agent Memory](https://github.com/knowall-ai/mcp-neo4j-agent-memory)** - Memory management for AI agents using Neo4j knowledge graphs\n- <img height=\"12\" width=\"12\" src=\"https://neo4j.com/favicon.ico\" alt=\"Neo4j Logo\" /> **[Neo4j GDS](https://github.com/neo4j-contrib/gds-agent)** - Neo4j graph data science server with comprehensive graph algorithms that enables complex graph reasoning and Q&A.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/183852044?s=48&v=4\" alt=\"Neon Logo\" /> **[Neon](https://github.com/neondatabase/mcp-server-neon)** - Interact with the Neon serverless Postgres platform\n- <img height=\"12\" width=\"12\" src=\"https://app.usenerve.com/favicon.ico\" alt=\"Nerve Logo\" /> **[Nerve](https://github.com/nerve-hq/nerve-mcp-server)** - Search and Act on all your company data across all your SaaS apps via [Nerve](https://www.usenerve.com/)\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/370544\" alt=\"NetApp Logo\" /> **[NetApp](https://github.com/NetApp/mcp)** - Query metrics, manage volumes, and search across your NetApp systems and services.\n- <img height=\"12\" width=\"12\" src=\"https://www.netdata.cloud/favicon-32x32.png\" alt=\"Netdata Logo\" /> **[Netdata](https://github.com/netdata/netdata/blob/master/src/web/mcp/README.md)** - Discovery, exploration, reporting and root cause analysis using all observability data, including metrics, logs, systems, containers, processes, and network connections\n- <img height=\"12\" width=\"12\" src=\"https://www.netlify.com/favicon/icon.svg\" alt=\"Netlify Logo\" /> **[Netlify](https://docs.netlify.com/welcome/build-with-ai/netlify-mcp-server/)** - Create, build, deploy, and manage your websites with Netlify web platform.\n- <img height=\"12\" width=\"12\" src=\"https://www.thenile.dev/favicon.ico\" alt=\"Nile Logo\" /> **[Nile](https://github.com/niledatabase/nile-mcp-server)** - An MCP server that talks to Nile - Postgres re-engineered for B2B apps. Manage and query databases, tenants, users, auth using LLMs\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/208441832?s=400&v=4\" alt=\"Nodit Logo\" /> **[Nodit](https://github.com/noditlabs/nodit-mcp-server)** - Official Nodit MCP Server enabling access to multi-chain RPC Nodes and Data APIs for blockchain data.\n- <img height=\"12\" width=\"12\" src=\"https://app.norman.finance/favicons/favicon-32x32.png\" alt=\"Norman Logo\" /> **[Norman Finance](https://github.com/norman-finance/norman-mcp-server)** - MCP server for managing accounting and taxes with Norman Finance.\n- <img height=\"12\" width=\"12\" src=\"https://notifly.tech/favicon.ico\" alt=\"Notifly Logo\" /> **[Notifly](https://github.com/notifly-tech/notifly-mcp-server)** - Notifly MCP Server that enables AI agents to provide real-time, trusted Notifly documentation and SDK code examples for seamless integrations.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/4792552?s=200&v=4\" alt=\"Notion Logo\" /> **[Notion](https://github.com/makenotion/notion-mcp-server#readme)** - This project implements an MCP server for the Notion API.\n- <img height=\"12\" width=\"12\" src=\"https://www.nutrient.io/assets/images/logos/nutrient.svg\" alt=\"Nutrient Logo\" /> **[Nutrient](https://github.com/PSPDFKit/nutrient-dws-mcp-server)** - Create, Edit, Sign, Extract Documents using Natural Language\n- <img height=\"12\" width=\"12\" src=\"https://nx.dev/favicon/favicon.svg\" alt=\"Nx Logo\" /> **[Nx](https://github.com/nrwl/nx-console/blob/master/apps/nx-mcp)** - Makes [Nx's understanding](https://nx.dev/features/enhance-AI) of your codebase accessible to LLMs, providing insights into the codebase architecture, project relationships and runnable tasks thus allowing AI to make precise code suggestions.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/82347605?s=48&v=4\" alt=\"OceanBase Logo\" /> **[OceanBase](https://github.com/oceanbase/mcp-oceanbase)** - MCP Server for OceanBase database and its tools\n- <img height=\"12\" width=\"12\" src=\"https://docs.octagonagents.com/logo.svg\" alt=\"Octagon Logo\" /> **[Octagon](https://github.com/OctagonAI/octagon-mcp-server)** - Deliver real-time investment research with extensive private and public market data.\n- <img height=\"12\" width=\"12\" src=\"https://octoeverywhere.com/img/logo.png\" alt=\"OctoEverywhere Logo\" /> **[OctoEverywhere](https://github.com/OctoEverywhere/mcp)** - A 3D Printing MCP server that allows for querying for live state, webcam snapshots, and 3D printer control.\n- <img height=\"12\" width=\"12\" src=\"https://raw.githubusercontent.com/OctopusDeploy/mcp-server/refs/heads/main/images/logo.svg\" alt=\"Octopus Deploy\" /> **[Octopus Deploy](https://github.com/OctopusDeploy/mcp-server)** - Official MCP server for querying, inspecting, and managing your [Octopus Deploy](https://octopus.com/) instance.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/211697972\" alt=\"Offorte Logo\" /> **[Offorte](https://github.com/offorte/offorte-mcp-server#readme)** - Offorte Proposal Software official MCP server enables creation and sending of business proposals.\n- <img height=\"12\" width=\"12\" src=\"https://maps.olakrutrim.com/favicon.ico\" alt=\"Ola Maps\" /> **[OlaMaps](https://pypi.org/project/ola-maps-mcp-server)** - Official Ola Maps MCP Server for services like geocode, directions, place details and many more.\n- <img height=\"12\" width=\"12\" src=\"https://www.olostep.com/favicon.ico\" alt=\"Olostep\" /> **[Olostep](https://github.com/olostep/olostep-mcp-server)** - Search, scrape and crawl content from web. Real-time results in clean markdown.\n- **[OMOP MCP](https://github.com/OHNLP/omop_mcp)** - Map clinical terminology to OMOP concepts using LLMs for healthcare data standardization.\n- <img height=\"12\" width=\"12\" src=\"https://static.onlyoffice.com/images/favicon.ico\" alt=\"ONLYOFFICE DocSpace\" /> **[ONLYOFFICE DocSpace](https://github.com/ONLYOFFICE/docspace-mcp)** - Interact with [ONLYOFFICE DocSpace](https://www.onlyoffice.com/docspace.aspx) API to create rooms, manage files and folders.\n- <img height=\"12\" width=\"12\" src=\"https://op.gg/favicon.ico\" alt=\"OP.GG Logo\" /> **[OP.GG](https://github.com/opgginc/opgg-mcp)** - Access real-time gaming data across popular titles like League of Legends, TFT, and Valorant, offering champion analytics, esports schedules, meta compositions, and character statistics.\n- <img height=\"12\" width=\"12\" src=\"https://open-metadata.org/favicon.ico\" alt=\"OpenMetadata\" /> **[OpenMetadata](https://open-metadata.org/mcp)** - The first Enterprise-grade MCP server for metadata\n- <img height=\"12\" width=\"12\" src=\"https://opensearch.org/wp-content/uploads/2025/01/opensearch_mark_default.svg\" alt=\"OpenSearch Logo\" /> **[OpenSearch](https://github.com/opensearch-project/opensearch-mcp-server-py)** -  MCP server that enables AI agents to perform search and analytics use cases on data stored in [OpenSearch](https://opensearch.org/).\n- <img height=\"12\" width=\"12\" src=\"https://app.opslevel.com/favicon.ico\" alt=\"OpsLevel\" /> **[OpsLevel](https://github.com/opslevel/opslevel-mcp)** - Official MCP Server for [OpsLevel](https://www.opslevel.com).\n- <img height=\"12\" width=\"12\" src=\"https://optuna.org/assets/img/favicon.ico\" alt=\"Optuna Logo\" /> **[Optuna](https://github.com/optuna/optuna-mcp)** - Official MCP server enabling seamless orchestration of hyperparameter search and other optimization tasks with [Optuna](https://optuna.org/).\n- <img height=\"12\" width=\"12\" src=\"https://raw.githubusercontent.com/oracle/mcp/refs/heads/main/oracle.svg\" alt=\"Oracle Logo\" /> **[Oracle](https://docs.oracle.com/en/database/oracle/sql-developer-command-line/25.2/sqcug/starting-and-managing-sqlcl-mcp-server.html#GUID-5F916B5D-8670-42BD-9F8B-D3D2424EC47E)** - Official [Oracle Database: SQLcl ](https://www.oracle.com/database/sqldeveloper/technologies/sqlcl/download/) MCP server enabling all access to any Oracle Database via native MCP support directly in SQLcl.\n- <img height=\"12\" width=\"12\" src=\"https://orshot.com/brand/favicon.svg\" alt=\"Orshot Logo\" /> **[Orshot](https://github.com/rishimohan/orshot-mcp-server)** - Official [Orshot](https://orshot.com) MCP server to dynamically generate images from custom design templates.\n- <img height=\"12\" width=\"12\" src=\"https://oxylabs.io/favicon.ico\" alt=\"Oxylabs Logo\" /> **[Oxylabs](https://github.com/oxylabs/oxylabs-mcp)** - Scrape websites with Oxylabs Web API, supporting dynamic rendering and parsing for structured data extraction.\n- <img height=\"12\" width=\"12\" src=\"https://developer.paddle.com/favicon.svg\" alt=\"Paddle Logo\" /> **[Paddle](https://github.com/PaddleHQ/paddle-mcp-server)** - Interact with the Paddle API. Manage product catalog, billing and subscriptions, and reports.\n- **[PaddleOCR](https://paddlepaddle.github.io/PaddleOCR/latest/en/version3.x/deployment/mcp_server.html)** - An MCP server that brings enterprise-grade OCR and document parsing capabilities to AI applications.\n- <img height=\"12\" width=\"12\" src=\"https://cdn.brandfolder.io/YX9ETPCP/at/266537g8kh6mmvt24jvsjb/P-GreenRGB.svg\" alt=\"PagerDuty Logo\" /> **[PagerDuty](https://github.com/PagerDuty/pagerduty-mcp-server)** - Interact with your PagerDuty account, allowing you to manage incidents, services, schedules, and more directly from your MCP-enabled client.\n- **[Pagos](https://github.com/pagos-ai/pagos-mcp)** - Interact with the Pagos API. Query Credit Card BIN Data with more to come.\n- <img height=\"12\" width=\"12\" src=\"https://paiml.com/favicon.ico\" alt=\"PAIML Logo\" /> **[PAIML MCP Agent Toolkit](https://github.com/paiml/paiml-mcp-agent-toolkit)** - Professional project scaffolding toolkit with zero-configuration AI context generation, template generation for Rust/Deno/Python projects, and hybrid neuro-symbolic code analysis.\n- <img src=\"https://cdn.bfldr.com/7GK1OJLK/at/kq7cwt4vkw5m2x9s4gkvbf7g/android-chrome-512x512-favicon.png?auto=webp&format=png&width=12&height=12\" width=\"12\" height=\"12\" alt=\"PandaDoc\"> **[PandaDoc](https://developers.pandadoc.com/docs/use-pandadoc-mcp-server)** - Configure AI development tools to connect to PandaDoc's Model Context Protocol server and leverage AI-powered PandaDoc integrations.\n- <img height=\"12\" width=\"12\" src=\"https://app.paperinvest.io/favicon.svg\" alt=\"Paper Logo\" /> **[Paper](https://github.com/paperinvest/mcp-server)** - Realistic paper trading platform with market simulation, 22 broker emulations, and professional tools for risk-free trading practice. First trading platform with MCP integration.\n- <img height=\"12\" width=\"12\" src=\"https://parallel.ai/favicon.ico\" alt=\"Parallel Logo\" /> **[Parallel Task MCP](https://github.com/parallel-web/task-mcp)** - Initiate Deep Research and Batch Tasks\n- **[Patronus AI](https://github.com/patronus-ai/patronus-mcp-server)** - Test, evaluate, and optimize AI agents and RAG apps\n- <img height=\"12\" width=\"12\" src=\"https://mcp.paubox.com/paubox.png\" alt=\"Paubox Logo\" />**[Paubox](https://mcp.paubox.com)** - Official MCP server which allows AI agents to interact with Paubox Email API. HITRUST certified.\n- <img height=\"12\" width=\"12\" src=\"https://www.paypalobjects.com/webstatic/icon/favicon.ico\" alt=\"PayPal Logo\" /> **[PayPal](https://mcp.paypal.com)** - PayPal's official MCP server.\n- <img height=\"12\" width=\"12\" src=\"https://www.foxit.com/favicon.ico\" alt=\"Foxit Logo\" /> **[PDFActionInspector](https://github.com/foxitsoftware/PDFActionInspector/tree/develop)** - A Model Context Protocol server for extracting and analyzing JavaScript Actions from PDF files. Provides comprehensive security analysis to detect malicious PDF behaviors, hidden scripts, and potential security threats through AI-assisted risk assessment.\n- <img height=\"12\" width=\"12\" src=\"https://ww2-secure.pearl.com/static/pearl/pearl-logo.svg\" alt=\"Pearl Logo\" /> **[Pearl](https://github.com/Pearl-com/pearl_mcp_server)** - Official MCP Server to interact with Pearl API. Connect your AI Agents with 12,000+ certified experts instantly.\n- <img height=\"12\" width=\"12\" src=\"https://www.perplexity.ai/favicon.ico\" alt=\"Perplexity Logo\" /> **[Perplexity](https://github.com/ppl-ai/modelcontextprotocol)** - An MCP server that connects to Perplexity's Sonar API, enabling real-time web-wide research in conversational AI.\n- <img height=\"12\" width=\"12\" src=\"https://github.com/mattjoyce.png\" alt=\"Persona Sessions Logo\" /> **[Persona Sessions](https://github.com/mattjoyce/mcp-persona-sessions)** - Enable AI assistants to conduct structured, persona-driven sessions including interview preparation, personal reflection, and coaching conversations with built-in timer and evaluation.\n- <img height=\"12\" width=\"12\" src=\"https://www.pga.com/favicon.ico\" alt=\"PGA Logo\" /> **[PGA (Golf)](https://mcp.pga.com)** - PGA's official MCP Server for all things golf-related. Find a coach, play golf, improve your game, and more.\n- <img height=\"12\" width=\"12\" src=\"https://www.pgyer.com/favicon.ico\" alt=\"PGYER Logo\" /> **[PGYER](https://github.com/PGYER/pgyer-mcp-server)** - MCP Server for [PGYER](https://www.pgyer.com/) platform, supports uploading, querying apps, etc.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/54333248\" /> **[Pinecone](https://github.com/pinecone-io/pinecone-mcp)** - [Pinecone](https://docs.pinecone.io/guides/operations/mcp-server)'s developer MCP Server assist developers in searching documentation and managing data within their development environment.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/54333248\" /> **[Pinecone Assistant](https://github.com/pinecone-io/assistant-mcp)** - Retrieves context from your [Pinecone Assistant](https://docs.pinecone.io/guides/assistant/mcp-server) knowledge base.\n- <img height=\"12\" width=\"12\" src=\"https://www.pinmeto.com/hubfs/PinMeTo-Favicon.png\" alt=\"PinMeTo logo\" /> **[PinMeTo](https://github.com/PinMeTo/pinmeto-location-mcp)** - MCP server that enables users with authorized credentials to unlock their location data.\n- <img height=\"12\" width=\"12\" src=\"https://pipedream.com/favicon.ico\" alt=\"Pipedream Logo\" /> **[Pipedream](https://github.com/PipedreamHQ/pipedream/tree/master/modelcontextprotocol)** - Connect with 2,500 APIs with 8,000+ prebuilt tools.\n- <img height=\"12\" width=\"12\" src=\"https://storage.googleapis.com/plainly-static-data/plainly%20-%20logo.png\" alt=\"PlainlyVideos Logo\" /> **[Plainly Videos](https://github.com/plainly-videos/mcp-server)** - The official MCP server for [Plainly Videos](https://plainlyvideos.com), allowing users to browse designs and projects, as well as render videos using various LLM clients.\n- <img height=\"12\" width=\"12\" src=\"https://playcanvas.com/static-assets/images/icons/favicon.png\" alt=\"PlayCanvas Logo\" /> **[PlayCanvas](https://github.com/playcanvas/editor-mcp-server)** - Create interactive 3D web apps with the PlayCanvas Editor.\n- <img height=\"12\" width=\"12\" src=\"https://playwright.dev/img/playwright-logo.ico\" alt=\"Playwright Logo\" /> **[Playwright](https://github.com/microsoft/playwright-mcp)** — Browser automation MCP server using Playwright to run tests, navigate pages, capture screenshots, scrape content, and automate web interactions reliably.\n- <img height=\"12\" width=\"12\" src=\"https://www.plugged.in/favicon.ico\" alt=\"Plugged.in Logo\" /> **[Plugged.in](https://github.com/VeriTeknik/pluggedin-mcp)** - A comprehensive proxy that combines multiple MCP servers into a single MCP. It provides discovery and management of tools, prompts, resources, and templates across servers, plus a playground for debugging when building MCP servers.\n- <img height=\"12\" width=\"12\" src=\"https://p-link.io/favicon.ico\" alt=\"P-Link.io Logo\" /> **[P-Link.io](https://github.com/paracetamol951/P-Link-MCP)** - HTTP 402 Protocol implementation on Solana network. Sending & receiving payments for agents\n- <img height=\"12\" width=\"12\" src=\"https://polymarket.com/favicon.ico\" alt=\"Polymarket Logo\" /> **[Polymarket](https://github.com/ozgureyilmaz/polymarket-mcp)** - Real-time prediction market data from Polymarket - search markets, analyze prices, identify trading opportunities.\n- <img height=\"12\" width=\"12\" src=\"https://plusai.com/622ffb3448f15ce7a33c6a2b/652d81ccc31a7d50861db0ef_plus_favicon.ico\" alt=\"Plus AI Logo\" /> **[Plus AI](https://plusai.com/features/mcp)** - A Model Context Protocol (MCP) server for automatically generating professional PowerPoint and Google Slides presentations using the [Plus AI](https://plusai.com/) presentation API.\n- <img height=\"12\" width=\"12\" src=\"https://github.com/port-labs/port-mcp-server/blob/main/assets/port_symbol_white.svg\" alt=\"Port Logo\" /> **[Port IO](https://github.com/port-labs/port-mcp-server)** - Access and manage your software catalog to improve service quality and compliance.\n- **[PostHog](https://github.com/posthog/mcp)** - Interact with PostHog analytics, feature flags, error tracking and more with the official PostHog MCP server.\n- <img height=\"12\" width=\"12\" src=\"https://postidentity.com/favicon.ico\" alt=\"PostIdentity Logo\" /> **[PostIdentity](https://github.com/PostIdentity/mcp-server)** - Generate AI-powered social media posts from any AI assistant. Manage identities, create posts, track referrals, and browse marketplace templates, powered by [PostIdentity](https://postidentity.com).\n- **[Postman API](https://github.com/postmanlabs/postman-api-mcp)** - Manage your Postman resources using the [Postman API](https://www.postman.com/postman/postman-public-workspace/collection/i2uqzpp/postman-api).\n- <img height=\"12\" width=\"12\" src=\"https://powerdrill.ai/_next/static/media/powerdrill.0fa27d00.webp\" alt=\"Powerdrill Logo\" /> **[Powerdrill](https://github.com/powerdrillai/powerdrill-mcp)** - An MCP server that provides tools to interact with Powerdrill datasets, enabling smart AI data analysis and insights.\n- <img height=\"12\" width=\"12\" src=\"https://www.pre.dev/predevlogowhitebackground.png\" alt=\"pre.dev Logo\" /> **[pre.dev Architect](https://docs.pre.dev/mcp-server)** - 10x your coding agent by keeping it on track with pre.dev.\n- <img height=\"12\" width=\"12\" src=\"https://devdocs.prestashop-project.org/images/favicon.png\" alt=\"PrestaShop Logo\" /> **[PrestaShop.com](https://docs.mcp.prestashop.com/)** - Manage your PrestaShop store with AI Assistant by using the official PrestaShop MCP server.\n- <img height=\"12\" width=\"12\" src=\"https://www.prisma.io/images/favicon-32x32.png\" alt=\"Prisma Logo\" /> **[Prisma](https://www.prisma.io/docs/postgres/integrations/mcp-server)** - Create and manage Prisma Postgres databases\n- <img height=\"12\" width=\"12\" src=\"https://probe.dev/favicon.ico\" alt=\"Probe.dev Logo\" /> **[Probe.dev](https://docs.probe.dev/guides/mcp-integration)** - Comprehensive media analysis and validation powered by [Probe.dev](https://probe.dev). Hosted MCP server with FFprobe, MediaInfo, and Probe Report analysis capabilities.\n- <img height=\"12\" width=\"12\" src=\"https://framerusercontent.com/images/FGzpihs4MxmSJhyGZ6n7f2Xj0.png\" alt=\"Prode.ai Logo\" /> **[ProdE](https://github.com/CuriousBox-AI/ProdE-mcp)** - Your 24/7 production engineer that preserves context across multiple codebases.\n- <img height=\"12\" width=\"12\" src=\"https://programintegrity.org/wp-content/uploads/2024/07/PIA-Favicon.svg\" alt=\"Program Integrity Alliance (PIA) Logo\" /> **[Program Integrity Alliance (PIA)](https://github.com/Program-Integrity-Alliance/pia-mcp-local)** - Local and Hosted MCP servers providing AI-friendly access to U.S. Government Open Datasets. Also available on [Docker MCP Catalog](https://hub.docker.com/mcp/explore?search=PIA). See [our website](https://programintegrity.org) for more details.\n- <img height=\"12\" width=\"12\" src=\"https://github.com/newtype-01/prompthouse-mcp/raw/main/prompthouse-logo-12x12.png\" alt=\"PromptHouse Logo\" /> **[PromptHouse](https://github.com/newtype-01/prompthouse-mcp)** - Personal prompt library with MCP integration for AI clients.\n- <img height=\"12\" width=\"12\" src=\"https://docs.speedscale.com/img/favicon.ico\" alt=\"proxymock Logo\" /> **[proxymock](https://docs.speedscale.com/proxymock/reference/mcp/)** - An MCP server that automatically generates tests and mocks by recording a live app.\n- <img src=\"https://www.pubnub.com/favicon/favicon-32x32.png\" alt=\"PubNub\" width=\"12\" height=\"12\"> **[PubNub](https://github.com/pubnub/pubnub-mcp-server)** - Retrieves context for developing with PubNub SDKs and calling APIs.\n- <img height=\"12\" width=\"12\" src=\"https://www.pulumi.com/images/favicon.ico\" alt=\"Pulumi Logo\" /> **[Pulumi](https://github.com/pulumi/mcp-server)** - Deploy and manage cloud infrastructure using [Pulumi](https://pulumi.com).\n- <img height=\"12\" width=\"12\" src=\"https://pure.md/favicon.png\" alt=\"Pure.md Logo\" /> **[Pure.md](https://github.com/puremd/puremd-mcp)** - Reliably access web content in markdown format with [pure.md](https://pure.md) (bot detection avoidance, proxy rotation, and headless JS rendering built in).\n- <img height=\"12\" width=\"12\" src=\"https://put.io/images/favicon.ico\" alt=\"Put.io Logo\" /> **[Put.io](https://github.com/putdotio/putio-mcp-server)** - Interact with your Put.io account to download torrents.\n- <img height=\"12\" width=\"12\" src=\"https://qdrant.tech/img/brand-resources-logos/logomark.svg\" /> **[Qdrant](https://github.com/qdrant/mcp-server-qdrant/)** - Implement semantic memory layer on top of the Qdrant vector search engine\n- <img src=\"https://avatars.githubusercontent.com/u/18053493?s=200&v=4\" alt=\"Qonto\" width=\"12\" height=\"12\"> **[Qonto](https://github.com/qonto/qonto-mcp-server)** - Access and interact your Qonto account through LLMs using MCP.\n- <img src=\"https://api.qoretechnologies.com/api/public/apps/Qorus/qorus-logo.svg\" alt=\"Qorus\" width=\"12\" height=\"12\"> **[Qorus](https://qoretechnologies.com/manual/qorus/current/qorus/sysarch.html#mcp_server)** - Connect to any application, system, or technology and automate your business processes without coding and with AI\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/3912814\" alt=\"QuantConnect Logo\" /> **[QuantConnect](https://github.com/QuantConnect/mcp-server)** - Interact with your [QuantConnect](https://www.quantconnect.com/) account to update projects, write strategies, run backtest, and deploying strategies to production live-trading.\n- **[Quickchat AI](https://github.com/incentivai/quickchat-ai-mcp)** - Launch your conversational [Quickchat AI](https://quickchat.ai) agent as an MCP to give AI apps real-time access to its Knowledge Base and conversational capabilities\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/165178062\" alt=\"Ragie Logo\" /> **[Ragie](https://github.com/ragieai/ragie-mcp-server/)** - Retrieve context from your [Ragie](https://www.ragie.ai) (RAG) knowledge base connected to integrations like Google Drive, Notion, JIRA and more.\n- <img height=\"12\" width=\"12\" src=\"https://www.ramp.com/favicon.ico\" /> **[Ramp](https://github.com/ramp-public/ramp-mcp)** - Interact with [Ramp](https://ramp.com)'s Developer API to run analysis on your spend and gain insights leveraging LLMs\n- **[Raygun](https://github.com/MindscapeHQ/mcp-server-raygun)** - Interact with your crash reporting and real using monitoring data on your Raygun account\n- <img height=\"12\" width=\"12\" src=\"https://framerusercontent.com/images/CU1m0xFonUl76ZeaW0IdkQ0M.png\" alt=\"Razorpay Logo\" /> **[Razorpay](https://github.com/razorpay/razorpay-mcp-server)** - Razorpay's official MCP server\n- <img height=\"12\" width=\"12\" src=\"https://www.recraft.ai/favicons/icon.svg\" alt=\"Recraft Logo\" /> **[Recraft](https://github.com/recraft-ai/mcp-recraft-server)** - Generate raster and vector (SVG) images using [Recraft](https://recraft.ai). Also you can edit, upscale images, create your own styles, and vectorize raster images\n- <img height=\"12\" width=\"12\" src=\"https://www.redhat.com/favicon.ico\" alt=\"Red Hat Logo\" /> **[Red Hat Insights](https://github.com/RedHatInsights/insights-mcp)** - Interact with [Red Hat Insights](https://www.redhat.com/en/technologies/management/insights) - build images, manage vulnerabilities, or view targeted recommendations.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/1529926\" alt=\"Redis Logo\" /> **[Redis](https://github.com/redis/mcp-redis/)** - The Redis official MCP Server offers an interface to manage and search data in Redis.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/1529926\" alt=\"Redis Logo\" /> **[Redis Cloud API](https://github.com/redis/mcp-redis-cloud/)** - The Redis Cloud API MCP Server allows you to manage your Redis Cloud resources using natural language.\n- <img src=\"https://avatars.githubusercontent.com/u/149024635\" alt=\"Reexpress\" width=\"12\" height=\"12\"> **[Reexpress](https://github.com/ReexpressAI/reexpress_mcp_server)** - Enable Similarity-Distance-Magnitude statistical verification for your search, software, and data science workflows\n- <img height=\"12\" width=\"12\" src=\"https://cdn.prod.website-files.com/68a872edf3df6064de547670/68b7f089c45a6083ce25acb1_reflag-favicon-32.png\" alt=\"Reflag\" /> **[Reflag](https://github.com/reflagcom/javascript/tree/main/packages/cli#model-context-protocol)** - Create and manage feature flags using [Reflag](https://reflag.com)\n- <img height=\"12\" width=\"12\" src=\"https://www.reltio.com/wp-content/uploads/2024/03/cropped-cropped-Reltio_Light_Mode_Dark_Mode_Favicon-270x270.png\" alt=\"Reltio Logo\" /> **[Reltio](https://github.com/reltio-ai/reltio-mcp-server)** - A lightweight, plugin-based MCP server designed to perform advanced entity matching with language models in Reltio environments.\n- <img height=\"12\" width=\"12\" src=\"https://www.rember.com/favicon.ico\" alt=\"Rember Logo\" /> **[Rember](https://github.com/rember/rember-mcp)** - Create spaced repetition flashcards in [Rember](https://rember.com) to remember anything you learn in your chats\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/114033652\" alt=\"Render Logo\" /> **[Render](https://render.com/docs/mcp-server)** - The official Render MCP server: spin up new services, run queries against your databases, and debug rapidly with direct access to service metrics and logs.\n- <img height=\"12\" width=\"12\" src=\"https://reportportal.io/favicon.ico\" alt=\"ReportPortal Logo\" /> **[ReportPortal](https://github.com/reportportal/reportportal-mcp-server)** - explore and analyze automated test results from [ReportPortal](https://reportportal.io) using your favourite LLM.\n- <img height=\"12\" width=\"12\" src=\"http://nonica.io/Nonica-logo.ico\" alt=\"Nonica Logo\" /> **[Revit](https://github.com/NonicaTeam/AI-Connector-for-Revit)** - Connect and interact with your Revit models live.\n- <img height=\"12\" width=\"12\" src=\"https://ui.rilldata.com/favicon.png\" alt=\"Rill Data Logo\" /> **[Rill Data](https://docs.rilldata.com/explore/mcp)** - Interact with Rill Data to query and analyze your data.\n- <img height=\"12\" width=\"12\" src=\"https://riza.io/favicon.ico\" alt=\"Riza logo\" /> **[Riza](https://github.com/riza-io/riza-mcp)** - Arbitrary code execution and tool-use platform for LLMs by [Riza](https://riza.io)\n- <img height=\"12\" width=\"12\" src=\"https://cdn.foundation.roblox.com/current/RobloxStudio.ico\" alt=\"Roblox Studio\" /> **[Roblox Studio](https://github.com/Roblox/studio-rust-mcp-server)** - Roblox Studio MCP Server, create and manipulate scenes, scripts in Roblox Studio\n- <img src=\"https://hyper3d.ai/favicon.ico\" alt=\"Rodin\" width=\"12\" height=\"12\"> **[Rodin](https://github.com/DeemosTech/rodin-api-mcp)** - Generate 3D Models with [Hyper3D Rodin](https://hyper3d.ai)\n- <img height=\"12\" width=\"12\" src=\"https://cdn.prod.website-files.com/66b7de6a233c04f4dac200a6/66bed52680d689629483c18b_faviconV2%20(2).png\" alt=\"Root Signals Logo\" /> **[Root Signals](https://github.com/root-signals/root-signals-mcp)** - Improve and quality control your outputs with evaluations using LLM-as-Judge\n- **[Roundtable](https://github.com/askbudi/roundtable)** - Unified integration layer that bridges multiple AI coding assistants (Codex, Claude Code, Cursor, Gemini) through zero-configuration auto-discovery and enterprise-ready architecture.\n- **[Routine](https://github.com/routineco/mcp-server)** - MCP server to interact with [Routine](https://routine.co/): calendars, tasks, notes, etc.\n- <img height=\"12\" width=\"12\" src=\"https://platform.composio.dev/favicon.ico\" alt=\"Composio Logo\"> **[Rube](https://github.com/ComposioHQ/Rube)** - Rube is a Model Context Protocol (MCP) server that connects your AI tools to 500+ apps like Gmail, Slack, GitHub, and Notion. Simply install it in your AI client, authenticate once with your apps, and start asking your AI to perform real actions like \"Send an email\" or \"Create a task.\"\n- <img height=\"12\" width=\"12\" src=\"https://raw.githubusercontent.com/safedep/.github/refs/heads/main/assets/logo/1.png\" alt=\"SafeDep Logo\" /> **[SafeDep](https://github.com/safedep/vet/blob/main/docs/mcp.md)** - SafeDep `vet-mcp` helps in  vetting open source packages for security risks—such as vulnerabilities and malicious code—before they're used in your project, especially with AI-generated code suggestions.\n- <img height=\"12\" width=\"12\" src=\"https://waf-ce.chaitin.cn/favicon.ico\" alt=\"SafeLine Logo\" /> **[SafeLine](https://github.com/chaitin/SafeLine/tree/main/mcp_server)** - [SafeLine](https://safepoint.cloud/landing/safeline) is a self-hosted WAF(Web Application Firewall) to protect your web apps from attacks and exploits.\n- <img height=\"12\" width=\"12\" src=\"https://scrapi.tech/favicon.ico\" alt=\"ScrAPI Logo\" /> **[ScrAPI](https://github.com/DevEnterpriseSoftware/scrapi-mcp)** - Web scraping using [ScrAPI](https://scrapi.tech). Extract website content that is difficult to access because of bot detection, captchas or even geolocation restrictions.\n- <img height=\"12\" width=\"12\" src=\"https://upnorthmedia.co/favicon.ico\" alt=\"Up North Media Logo\" /> **[ScreenshotMCP](https://github.com/upnorthmedia/ScreenshotMCP/)** - A Model Context Protocol MCP server for capturing website screenshots with full page, element, and device size features.\n- <img height=\"12\" width=\"12\" src=\"https://screenshotone.com/favicon.ico\" alt=\"ScreenshotOne Logo\" /> **[ScreenshotOne](https://github.com/screenshotone/mcp/)** - Render website screenshots with [ScreenshotOne](https://screenshotone.com/)\n- <img height=\"12\" width=\"12\" src=\"https://pics.fatwang2.com/56912e614b35093426c515860f9f2234.svg\" alt=\"Search1API Logo\" /> **[Search1API](https://github.com/fatwang2/search1api-mcp)** - One API for Search, Crawling, and Sitemaps\n- <img height=\"12\" width=\"12\" src=\"https://www.searchunify.com/favicon.ico\" alt=\"SearchUnify Logo\" /> **[SearchUnify](https://github.com/searchunify/su-mcp/)** - SearchUnify MCP Server (su-mcp) enables seamless integration of SearchUnify with Claude Desktop\n- <img height=\"12\" width=\"12\" src=\"https://secureframe.com/favicon.ico\" alt=\"Secureframe Logo\" /> **[Secureframe](https://github.com/secureframe/secureframe-mcp-server)** - Query security controls, monitor compliance tests, and access audit data across SOC 2, ISO 27001, CMMC, FedRAMP, and other frameworks from [Secureframe](https://secureframe.com).\n- <img height=\"12\" width=\"12\" src=\"https://semgrep.dev/favicon.ico\" alt=\"Semgrep Logo\" /> **[Semgrep](https://github.com/semgrep/semgrep/blob/develop/cli/src/semgrep/mcp/README.md)** - Enable AI agents to secure code with [Semgrep](https://semgrep.dev/).\n- <img height=\"12\" width=\"12\" src=\"https://semilattice.ai/favicon.png\" alt=\"Semilattice icon\" /> **[Semilattice](https://github.com/semilattice-research/mcp)** - Test content, personalise features, and A/B test decisions with accurate audience prediction.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/187640573?s=48&v=4\" alt=\"Sequa Logo\" /> **[Sequa.AI](https://github.com/sequa-ai/sequa-mcp)** - Stop stitching context for Copilot and Cursor. With [Sequa MCP](https://github.com/sequa-ai/sequa-mcp), your AI tools know all your codebases and docs out of the box.\n- <img height=\"12\" width=\"12\" src=\"https://cdn.prod.website-files.com/6372338e5477e047032b37a5/64f85e6388a2a5c8c9525b4d_favLogo.png\" alt=\"Shortcut Logo\" /> **[Shortcut](https://github.com/useshortcut/mcp-server-shortcut)** - Access and implement all of your projects and tasks (Stories) from [Shortcut](https://shortcut.com/).\n- <img height=\"12\" width=\"12\" src=\"https://simplifier.io/favicon.ico\" alt=\"Simplifier Logo\" /> **[Simplifier](https://github.com/simplifier-ag/simplifier-mcp)** - Manage connectors, business objects and more in your [Simplifier](https://simplifier.io/) low code platform.\n- <img height=\"12\" width=\"12\" src=\"https://www.singlestore.com/favicon-32x32.png?v=277b9cbbe31e8bc416504cf3b902d430\"/> **[SingleStore](https://github.com/singlestore-labs/mcp-server-singlestore)** - Interact with the SingleStore database platform\n- <img height=\"12\" width=\"12\" src=\"https://smartbear.com/smartbear/assets/img/favicon.png\" alt=\"SmartBear Logo\" /> **[SmartBear](https://github.com/SmartBear/smartbear-mcp)** - Provides access to multiple capabilities across SmartBear's API Hub, Test Hub, and Insight Hub, all through [dedicated tools and resources](https://developer.smartbear.com/smartbear-mcp/docs/mcp-server).\n- <img src=\"https://smooth-operator.online/logo48.png\" alt=\"Smooth Operator\" width=\"12\" height=\"12\"> **[Smooth Operator](https://smooth-operator.online/agent-tools-api-docs/toolserverdocs)** - Tools to automate Windows via AI Vision, Mouse, Keyboard, Automation Trees, Webbrowser\n- <img height=\"12\" width=\"12\" src=\"https://app.snyk.io/bundle/favicon-faj49uD9.png\" alt=\"Snyk Logo\" /> **[Snyk](https://github.com/snyk/snyk-ls/blob/main/mcp_extension/README.md)** - Enhance security posture by embedding [Snyk](https://snyk.io/) vulnerability scanning directly into agentic workflows.\n- <img height=\"12\" width=\"12\" src=\"https://www.sonarsource.com/favicon.ico\" alt=\"SonarQube Logo\" /> **[SonarQube](https://github.com/SonarSource/sonarqube-mcp-server)** - Enables seamless integration with [SonarQube](https://www.sonarsource.com/) Server or Cloud and allows for code snippet analysis within the agent context.\n- <img src=\"https://sophtron.com/favicon.ico\" alt=\"Sophtron\" width=\"12\" height=\"12\"> **[Sophtron](https://github.com/sophtron/Sophtron-Integration/tree/main/modelcontextprotocol)** - Connect to your bank, credit card, utilities accounts to retrieve account balances and transactions with [Sophtron Bank Integration](https://sophtron.com).\n- <img height=\"12\" width=\"12\" src=\"https://learn.microsoft.com/favicon.ico\" alt=\"Microsoft Learn Logo\" /> **[SQL Server](https://github.com/Azure-Samples/SQL-AI-samples/tree/main/MssqlMcp)** - Official Microsoft SQL Server MCP<sup>[1](https://devblogs.microsoft.com/azure-sql/introducing-mssql-mcp-server/)</sup>\n- <img height=\"12\" width=\"12\" src=\"https://www.stackhawk.com/wp-content/uploads/2025/03/icon-512x512-2-150x150.png\" alt=\"StackHawk Logo\" /> **[StackHawk](https://github.com/stackhawk/stackhawk-mcp)** - Use [StackHawk](https://www.stackhawk.com/) to test for and FIX security problems in your code or vibe coded app.\n- <img height=\"12\" width=\"12\" src=\"https://stackoverflow.com/Content/Sites/stackoverflow/Img/apple-touch-icon@2.png\" alt=\"StackOverflow Logo\" /> **[Stack Overflow](https://api.stackexchange.com/docs/mcp-server)** - Access Stack Overflow's trusted and verified technical questions and answers.\n- <img height=\"12\" width=\"12\" src=\"https://www.stardog.com/img/favicon.ico?_cchid=1cc28b39bd2e8a628edeed79ccd4f49c\" alt=\"Stardog Logo\" /> **[Stardog](https://github.com/stardog-union/stardog-cloud-mcp)** - Provide trusted, contextual answers to both humans and agents using your enterprise knowledge graph with [Stardog](https://www.stardog.com)'s Semantic AI Platform.\n- <img height=\"12\" width=\"12\" src=\"https://www.starrocks.io/favicon.ico\" alt=\"StarRocks Logo\" /> **[StarRocks](https://github.com/StarRocks/mcp-server-starrocks)** - Interact with [StarRocks](https://www.starrocks.io/)\n- <img height=\"12\" width=\"12\" src=\"https://downloads.steadybit.com/logomark.svg\" alt=\"Steadybit Logo\" /> **[Steadybit](https://github.com/steadybit/mcp)** - Interact with [Steadybit](https://www.steadybit.com/)\n- <img height=\"12\" width=\"12\" src=\"https://steuerboard.net/favicon.ico\" alt=\"Steuerboard Logo\" /> **[Steuerboard](https://github.com/steuerboard/steuerboard-mcp-typescript)** - Interact with the accounting data in your business using our official MCP server\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/22632046?s=200&v=4\" alt=\"Storybook Logo\" /> **[Storybook](https://github.com/storybookjs/addon-mcp)** - Interact with [Storybook](https://storybook.js.org/) to automate UI component testing and documentation\n- <img height=\"12\" width=\"12\" src=\"https://raw.githubusercontent.com/klavis-ai/klavis/main/static/klavis-ai.png\" alt=\"Strata Logo\" /> **[Strata](https://www.klavis.ai/)** - One MCP server that guides your AI agents through thousands of tools in multiple apps progressively. It eliminates context overload and ensures accurate tool selection, enabling agents to handle complex, multi-app workflows with ease.\n- <img height=\"12\" width=\"12\" src=\"https://stripe.com/favicon.ico\" alt=\"Stripe Logo\" /> **[Stripe](https://github.com/stripe/agent-toolkit)** - Interact with Stripe API\n- <img height=\"12\" width=\"12\" src=\"https://www.success.co/favicon.ico\" alt=\"Success.co Logo\" /> **[Success.co](https://www.success.co/docs/guides/ai-mcp-connector)** - Interact with your Success.co account - enhance your EOS® journey and get insights on your teams and business.\n- <img height=\"12\" width=\"12\" src=\"https://github.com/cdnsteve.png\" alt=\"Sugar Logo\" /> **[Sugar](https://github.com/cdnsteve/sugar)** - Autonomous AI development platform for Claude Code with task management, specialized agents, and workflow automation. Full MCP server bridges Claude with Python CLI for rich task context and autonomous execution.\n- <img height=\"12\" width=\"12\" src=\"https://sunra.ai/favicon.ico\" alt=\"Sunra AI Logo\" /> **[Sunra AI](https://github.com/sunra-ai/sunra-clients/tree/main/mcp-server)** - Search for and run AI models on [Sunra.ai](https://sunra.ai). Discover models, create video, image, and 3D model content, track their status, and manage the generated media.\n- <img height=\"12\" width=\"12\" src=\"https://supabase.com/favicon/favicon.ico\" alt=\"Supabase Logo\" /> **[Supabase](https://github.com/supabase-community/supabase-mcp)** - Interact with Supabase: Create tables, query data, deploy edge functions, and more.\n- <img height=\"12\" width=\"12\" src=\"https://supadata.ai/favicon.ico\" alt=\"Supadata Logo\" /> **[Supadata](https://github.com/supadata-ai/mcp)** - Official MCP server for [Supadata](https://supadata.ai) - YouTube, TikTok, X and Web data for makers.\n- <img height=\"12\" width=\"12\" src=\"https://d12w4pyrrczi5e.cloudfront.net/archive/50eb154ab859c63a8f1c850f9fe094e25d35e929/images/favicon.ico\" alt=\"Tako Logo\" /> **[Tako](https://github.com/TakoData/tako-mcp)** - Use natural language to search [Tako](https://trytako.com) for real-time financial, sports, weather, and public data with visualization\n- <img height=\"12\" width=\"12\" src=\"https://tavily.com/favicon.ico\" alt=\"Tavily Logo\" /> **[Tavily](https://github.com/tavily-ai/tavily-mcp)** - Search engine for AI agents (search + extract) powered by [Tavily](https://tavily.com/)\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/10522416?s=200&v=4\" alt=\"Telnyx Logo\" /> **[Telnyx](https://github.com/team-telnyx/telnyx-mcp-server)** - Official MCP server for building AI-powered communication apps. Create voice assistants, send SMS campaigns, manage phone numbers, and integrate real-time messaging with enterprise-grade reliability. Includes remote [streamable-http](https://api.telnyx.com/v2/mcp) and [sse](https://api.telnyx.com/mcp/sse) servers.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/91520705?s=48&v=4\" alt=\"Tencent RTC Logo\" /> **[Tencent RTC](https://github.com/Tencent-RTC/mcp)** - The MCP Server enables AI IDEs to more effectively understand and use [Tencent's Real-Time Communication](https://trtc.io/) SDKs and APIs, which significantly streamlines the process for developers to build audio/video call applications.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/1615979?s=200&v=4\" alt=\"Teradata Logo\" /> **[Teradata](https://github.com/Teradata/teradata-mcp-server)** - This MCP Server support tools and prompts for multi task data analytics on a [Teradata](https://teradata.com) platform.\n- <img height=\"12\" width=\"12\" src=\"https://raw.githubusercontent.com/hashicorp/terraform-mcp-server/main/public/images/Terraform-LogoMark_onDark.svg\" alt=\"Terraform Logo\" /> **[Terraform](https://github.com/hashicorp/terraform-mcp-server)** - Seamlessly integrate with Terraform ecosystem, enabling advanced automation and interaction capabilities for Infrastructure as Code (IaC) development powered by [Terraform](https://www.hashicorp.com/en/products/terraform)\n- <img height=\"12\" width=\"12\" src=\"https://textarttools.com/textarttoolslogo.png\" alt=\"TextArtTools Logo\" /> **[TextArtTools](https://github.com/humanjesse/textarttools-mcp)** - Transform text with 23 Unicode styles and create stylized banners with 322+ figlet fonts.\n- <img height=\"12\" width=\"12\" src=\"https://www.textin.com/favicon.png\" alt=\"TextIn Logo\" /> **[TextIn](https://github.com/intsig-textin/textin-mcp)** - An MCP server for the [TextIn](https://www.textin.com/?from=github_mcp) API, is a tool for extracting text and performing OCR on documents, it also supports converting documents into Markdown\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/106156665?s=200\" alt=\"Thena Logo\" /> **[Thena](https://mcp.thena.ai)** - Thena's MCP server for enabling users and AI agents to interact with Thena's services and manage customers across different channels such as Slack, Email, Web, Discord etc.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/24291394?v=4\" alt=\"ThingsBoard\" /> **[ThingsBoard](https://github.com/thingsboard/thingsboard-mcp)** - The ThingsBoard MCP Server provides a natural language interface for LLMs and AI agents to interact with your ThingsBoard IoT platform.\n- <img height=\"12\" width=\"12\" src=\"https://www.lg.com/favicon.ico\" alt=\"ThinQ Logo\" /> **[ThinQ Connect](https://github.com/thinq-connect/thinqconnect-mcp)** - Interact with LG ThinQ smart home devices and appliances through the ThinQ Connect MCP server.\n- <img height=\"12\" width=\"12\" src=\"https://thirdweb.com/favicon.ico\" alt=\"Thirdweb Logo\" /> **[Thirdweb](https://github.com/thirdweb-dev/ai/tree/main/python/thirdweb-mcp)** - Read/write to over 2k blockchains, enabling data querying, contract analysis/deployment, and transaction execution, powered by [Thirdweb](https://thirdweb.com/)\n- <img height=\"12\" width=\"12\" src=\"https://www.thoughtspot.com/favicon-16x16.png\" alt=\"ThoughtSpot Logo\" /> **[ThoughtSpot](https://github.com/thoughtspot/mcp-server)** - AI is the new BI. A dedicated data analyst for everyone on your team. Bring [ThoughtSpot](https://thoughtspot.com) powers into Claude or any MCP host.\n- <img height=\"12\" width=\"12\" src=\"https://tianji.msgbyte.com/img/dark-brand.svg\" alt=\"Tianji Logo\" /> **[Tianji](https://github.com/msgbyte/tianji/tree/master/apps/mcp-server)** - Interact with Tianji platform whatever selfhosted or cloud platform, powered by [Tianji](https://tianji.msgbyte.com/).\n- <img height=\"12\" width=\"12\" src=\"https://www.pingcap.com/favicon.ico\" alt=\"TiDB Logo\" /> **[TiDB](https://github.com/pingcap/pytidb)** - MCP Server to interact with TiDB database platform.\n- <img height=\"12\" width=\"12\" src=\"https://www.tinybird.co/favicon.ico\" alt=\"Tinybird Logo\" /> **[Tinybird](https://github.com/tinybirdco/mcp-tinybird)** - Interact with Tinybird serverless ClickHouse platform\n- <img height=\"12\" width=\"12\" src=\"https://b2729162.smushcdn.com/2729162/wp-content/uploads/2023/10/cropped-Favicon-1-192x192.png?lossy=1&strip=1&webp=1\" alt=\"Tldv Logo\" /> **[Tldv](https://gitlab.com/tldv/tldv-mcp-server)** - Connect your AI agents to Google-Meet, Zoom & Microsoft Teams through [tl;dv](https://tldv.io)\n- <img height=\"12\" width=\"12\" src=\"https://www.todoist.com/static/favicon-32x32.png\" alt=\"Todoist Logo\" /> **[Todoist](https://github.com/doist/todoist-ai)** - Search, add, and update [Todoist](https://todoist.com) tasks, projects, sections, comments, and more.\n- <img height=\"12\" width=\"12\" src=\"https://cdn.tokenmetrics.com/logo.svg\" alt=\"Token Metrics Logo\" /> **[Token Metrics](https://github.com/token-metrics/mcp)** - [Token Metrics](https://www.tokenmetrics.com/) integration for fetching real-time crypto market data, trading signals, price predictions, and advanced analytics.\n- <img height=\"12\" width=\"12\" src=\"https://di8m9w6rqrh5d.cloudfront.net/2G3TRwfv1w3GTLfmT7Dmco1VddoFTI5P/1920_6b7e7ec2-d897-4cd7-94f3-46a8301212c3.png\" alt=\"TomTom Logo\" /> **[TomTom-MCP](https://github.com/tomtom-international/tomtom-mcp)** - The [TomTom](https://www.tomtom.com/) MCP Server simplifies geospatial development by providing seamless access to TomTom's location services, including search, routing, traffic and static maps data.\n- <img height=\"12\" width=\"12\" src=\"https://images.tradeit.app/trade_agent/logo.svg\" alt=\"Trade It Logo\" /> **[Trade It](https://github.com/trade-it-inc/trade-it-mcp)** - Execute stock, crypto, and options trades on your brokerage via [Trade It](https://tradeit.app). Supports Robinhood, ETrade, Charles Schwab, Webull, Coinbase, and Kraken.\n- <img height=\"18\" width=\"18\" src=\"https://github.com/twelvedata/mcp/raw/develop/favicon.ico\" alt=\"Twelvedata Logo\" /> **[Twelve Data](https://github.com/twelvedata/mcp)** — Integrate your AI agents with real-time and historical financial market data through our official [Twelve Data](https://twelvedata.com) MCP server.\n- <img height=\"12\" width=\"12\" src=\"https://www.twilio.com/content/dam/twilio-com/core-assets/social/favicon-16x16.png\" alt=\"Twilio Logo\" /> **[Twilio](https://github.com/twilio-labs/mcp)** - Interact with [Twilio](https://www.twilio.com/en-us) APIs to send SMS messages, manage phone numbers, configure your account, and more.\n- <img height=\"12\" width=\"12\" src=\"https://miniprogram.tcsas-superapp.com/icon_512.png\" alt=\"TCSAS Logo\" /> **[TCSAS](https://github.com/TCMPP-Team/tcsas-devtools-mcp-server)** - Built on the Tencent Mini Program technical framework and fully following the development, powered by [Tencent Cloud Super App as a Service](https://www.tencentcloud.com/products/tcsas?lang=en&pg=).\n- <img height=\"12\" width=\"12\" src=\"https://uberall.com/media/favicon.svg\" alt=\"Uberall Logo\" /> **[Uberall](https://github.com/uberall/uberall-mcp-server)** – Manage multi - location presence, including listings, reviews, and social posting, via [uberall](https://uberall.com).\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/91906527\" alt=\"Unblocked Logo\" /> **[Unblocked](https://docs.getunblocked.com/unblocked-mcp)** Help your AI-powered IDEs generate faster, more accurate code by giving them access to context from Slack, Confluence, Google Docs, JIRA, and more with [Unblocked](https://getunblocked.com).\n- <img height=\"12\" width=\"12\" src=\"https://unifai.network/favicon.ico\" alt=\"UnifAI Logo\" /> **[UnifAI](https://github.com/unifai-network/unifai-mcp-server)** - Dynamically search and call tools using [UnifAI Network](https://unifai.network)\n- <img height=\"12\" width=\"12\" src=\"https://framerusercontent.com/images/plcQevjrOYnyriuGw90NfQBPoQ.jpg\" alt=\"Unstructured Logo\" /> **[Unstructured](https://github.com/Unstructured-IO/UNS-MCP)** - Set up and interact with your unstructured data processing workflows in [Unstructured Platform](https://unstructured.io)\n- <img height=\"12\" width=\"12\" src=\"https://uno-assets.platform.uno/logos/PNG/Uno_Platform_Symbol_RW.png\" alt=\"Uno Platform Logo\" /> **[Uno Platform](https://platform.uno/)** - Connects agents and developers to [Uno Platform's](https://aka.platform.uno/mcp) knowledge base - docs, APIs, and best practices allowing for building cross-platform .NET applications.\n- <img height=\"12\" width=\"12\" src=\"https://upstash.com/icons/favicon-32x32.png\" alt=\"Upstash Logo\" /> **[Upstash](https://github.com/upstash/mcp-server)** - Manage Redis databases and run Redis commands on [Upstash](https://upstash.com/) with natural language.\n- <img height=\"12\" width=\"12\" src=\"https://raw.githubusercontent.com/e2e-test-quest/uuv/refs/heads/main/uuv.ico\" alt=\"UUV Logo\" /> **[UUV](https://github.com/e2e-test-quest/uuv/tree/main/packages/mcp-server)** - Generate human readable end to end tests with [UUV](https://e2e-test-quest.github.io/uuv/).\n- <img height=\"12\" width=\"12\" src=\"http://vaadin.com/favicon.ico\" alt=\"Vaadin Logo\" /> **[Vaadin](https://github.com/marcushellberg/vaadin-documentation-services)** - Search Vaadin documentation, get the full documentation, and get version information. Designed for AI agents.\n- <img src=\"https://www.vantage.sh/favicon.ico\" alt=\"Vantage\" width=\"12\" height=\"12\"> **[Vantage](https://github.com/vantage-sh/vantage-mcp-server)** - Interact with your organization's cloud cost spend.\n- <img height=\"12\" width=\"12\" src=\"https://mcp.variflight.com/favicon.ico\" alt=\"VariFlight Logo\" /> **[VariFlight](https://github.com/variflight/variflight-mcp)** - VariFlight's official MCP server provides tools to query flight information, weather data, comfort metrics, the lowest available fares, and other civil aviation-related data.\n- <img height=\"12\" width=\"12\" src=\"https://docs.octagonagents.com/logo.svg\" alt=\"Octagon Logo\" /> **[VCAgents](https://github.com/OctagonAI/octagon-vc-agents)** - Interact with investor agents—think Wilson or Thiel—continuously updated with market intel.\n- **[Vectorize](https://github.com/vectorize-io/vectorize-mcp-server/)** - [Vectorize](https://vectorize.io) MCP server for advanced retrieval, Private Deep Research, Anything-to-Markdown file extraction and text chunking.\n- <img height=\"12\" width=\"12\" src=\"https://static.verbwire.com/favicon-16x16.png\" alt=\"Verbwire Logo\" /> **[Verbwire](https://github.com/verbwire/verbwire-mcp-server)** - Deploy smart contracts, mint NFTs, manage IPFS storage, and more through the Verbwire API\n- <img height=\"12\" width=\"12\" src=\"http://vercel.com/favicon.ico\" alt=\"Vercel Logo\" /> **[Vercel](https://vercel.com/docs/mcp/vercel-mcp)** - Access logs, search docs, and manage projects and deployments.\n- <img height=\"12\" width=\"12\" src=\"https://verodat.io/assets/favicon-16x16.png\" alt=\"Verodat Logo\" /> **[Verodat](https://github.com/Verodat/verodat-mcp-server)** - Interact with Verodat AI Ready Data platform\n- <img height=\"12\" width=\"12\" src=\"https://www.veyrax.com/favicon.ico\" alt=\"VeyraX Logo\" /> **[VeyraX](https://github.com/VeyraX/veyrax-mcp)** - Single tool to control all 100+ API integrations, and UI components\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/174736222?s=200&v=4\" alt=\"VictoriaLogs Logo\" /> **[VictoriaLogs](https://github.com/VictoriaMetrics-Community/mcp-victorialogs)** - Integration with [VictoriaLogs APIs](https://docs.victoriametrics.com/victorialogs/querying/#http-api) and [documentation](https://docs.victoriametrics.com/victorialogs/) for working with logs and debugging tasks related to your VictoriaLogs instances.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/174736222?s=200&v=4\" alt=\"VictoriaMetrics Logo\" /> **[VictoriaMetrics](https://github.com/VictoriaMetrics-Community/mcp-victoriametrics)** - Comprehensive integration with [VictoriaMetrics APIs](https://docs.victoriametrics.com/victoriametrics/url-examples/) and [documentation](https://docs.victoriametrics.com/) for monitoring, observability, and debugging tasks related to your VictoriaMetrics instances.\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/174736222?s=200&v=4\" alt=\"VictoriaTraces Logo\" /> **[VictoriaTraces](https://github.com/VictoriaMetrics-Community/mcp-victoriatraces)** - Integration with [VictoriaTraces APIs](https://docs.victoriametrics.com/victoriatraces/querying/#http-api) and [documentation](https://docs.victoriametrics.com/victoriatraces/) for working with distributed tracing and debugging tasks related to your VictoriaTraces instances.\n- <img height=\"12\" width=\"12\" src=\"https://framerusercontent.com/images/ijlYG00LOcMD6zR1XLMxHbAwZkM.png\" alt=\"VideoDB Director\" /> **[VideoDB Director](https://github.com/video-db/agent-toolkit/tree/main/modelcontextprotocol)** - Create AI-powered video workflows including automatic editing, content moderation, voice cloning, highlight generation, and searchable video moments—all accessible via simple APIs and intuitive chat-based interfaces.\n- <img height=\"12\" width=\"12\" src=\"https://landing.ai/wp-content/uploads/2024/04/cropped-favicon-192x192.png\" alt=\"LandingAI VisionAgent\" /> **[VisionAgent MCP](https://github.com/landing-ai/vision-agent-mcp)** - A simple MCP server that enables your LLM to better reason over images, video and documents.\n- <img height=\"12\" width=\"12\" src=\"https://raw.githubusercontent.com/mckinsey/vizro/main/vizro-core/docs/assets/images/favicon.png\" alt=\"Vizro Logo\" /> **[Vizro](https://github.com/mckinsey/vizro/tree/main/vizro-mcp)** - Tools and templates to create validated and maintainable data charts and dashboards\n- <img height=\"12\" width=\"12\" src=\"https://wavespeed.ai/logo.webp\" alt=\"WaveSpeed Logo\" /> **[WaveSpeed](https://github.com/WaveSpeedAI/mcp-server)** - WaveSpeed MCP server providing AI agents with image and video generation capabilities.\n- <img height=\"12\" width=\"12\" src=\"https://waystation.ai/images/logo.svg\" alt=\"WayStation Logo\" /> **[WayStation](https://github.com/waystation-ai/mcp)** - Universal MCP server to connect to popular productivity tools such as Notion, Monday, AirTable, and many more\n- <img height=\"12\" width=\"12\" src=\"https://www.webflow.com/favicon.ico\" alt=\"Webflow Logo\"> **[Webflow](https://github.com/webflow/mcp-server)** - Interact with Webflow sites, pages, and collections\n- <img height=\"12\" width=\"12\" src=\"https://webscraping.ai/favicon.ico\" alt=\"WebScraping.AI Logo\" /> **[WebScraping.AI](https://github.com/webscraping-ai/webscraping-ai-mcp-server)** - Interact with **[WebScraping.AI](https://WebScraping.AI)** for web data extraction and scraping\n- <img height=\"12\" width=\"12\" src=\"https://static.whatsapp.net/rsrc.php/v3/yz/r/ujTY9i_Jhs1.png\" alt=\"WhatsApp Business Logo\" /> **[WhatsApp Business](https://medium.com/@wassenger/introducing-whatsapp-mcp-ai-connector-3d393b52d1b0)** - WhatsApp Business MCP connector enabling AI agents to send messages, manage conversations, access templates, and integrate with WhatsApp Business API for automated customer communication.\n- <img height=\"12\" width=\"12\" src=\"https://winston-app-production-public.s3.us-east-1.amazonaws.com/winston-ai-favicon-light.svg\" alt=\"Winston.AI Logo\" /> **[Winston AI](https://github.com/gowinston-ai/winston-ai-mcp-server)** - AI detector MCP server with industry leading accuracy rates in detecting use of AI in text and images. The [Winston AI](https://gowinston.ai) MCP server also offers a robust plagiarism checker to help maintain integrity.\n- <img height=\"12\" width=\"12\" src=\"https://woocommerce.com/wp-content/uploads/2024/12/cropped-logo-w-favicon.png\" alt=\"WooCommerce.com Logo\" /> **[WooCommerce.com](https://developer.woocommerce.com/docs/features/mcp/)** - Manaage your WooCommerce.com store, products, and orders with our MCP integration.\n- <img height=\"12\" width=\"12\" src=\"https://developer.wordpress.com/wp-content/uploads/2025/03/cropped-favicon-64x64-from-figma.png\" alt=\"WordPress.com Logo\" /> **[WordPress.com](https://developer.wordpress.com/docs/mcp/)** - Connect your AI assistant to WordPress.com, giving you direct visibility into your site's content, analytics, and settings.\n- <img height=\"12\" width=\"12\" src=\"https://www.xero.com/favicon.ico\" alt=\"Xero Logo\" /> **[Xero](https://github.com/XeroAPI/xero-mcp-server)** - Interact with the accounting data in your business using our official MCP server\n- <img height=\"12\" width=\"12\" src=\"https://storage.yandexcloud.net/ydb-www-prod-site-assets/favicon-202305/favicon.ico\" alt=\"YDB Logo\" /> **[YDB](https://github.com/ydb-platform/ydb-mcp)** - Query [YDB](https://ydb.tech/) databases\n- <img height=\"12\" width=\"12\" src=\"https://fe-resource.yeelight.com/logo-black.jpeg\" alt=\"Yeelight Logo\" /> **[Yeelight MCP Server](https://github.com/Yeelight/yeelight-iot-mcp)** - The official [Yeelight MCP Server](https://github.com/Yeelight/yeelight-iot-mcp) enables users to control and query their [Yeelight](https://en.yeelight.com/) smart devices using natural language, offering a seamless and efficient human-AI interaction experience.\n- <img height=\"12\" width=\"12\" src=\"https://cdn.prod.website-files.com/632cd328ed2b485519c3f689/6334977a5d1a542102d4b9b5_favicon-32x32.png\" alt=\"YepCode Logo\" /> **[YepCode](https://github.com/yepcode/mcp-server-js)** - Run code in a secure, scalable sandbox environment with full support for dependencies, secrets, logs, and access to APIs or databases. Powered by [YepCode](https://yepcode.io)\n- <img height=\"12\" width=\"12\" src=\"https://www.yugabyte.com/favicon-16x16.png\" alt=\"YugabyteDB Logo\" /> **[YugabyteDB](https://github.com/yugabyte/yugabytedb-mcp-server)** -  MCP Server to interact with your [YugabyteDB](https://www.yugabyte.com/) database\n- <img height=\"12\" width=\"12\" src=\"https://avatars.githubusercontent.com/u/14069894\" alt=\"Yunxin Logo\" /> **[Yunxin](https://github.com/netease-im/yunxin-mcp-server)** - An MCP server that connects to Yunxin's IM/RTC/DATA Open-API\n- <img height=\"12\" width=\"12\" src=\"https://cdn.zapier.com/zapier/images/favicon.ico\" alt=\"Zapier Logo\" /> **[Zapier](https://zapier.com/mcp)** - Connect your AI Agents to 8,000 apps instantly.\n- <img height=\"12\" width=\"12\" src=\"https://www.zenable.app/zenable_light.svg\" alt=\"Zenable Logo\" /> **[Zenable](https://docs.zenable.io/integrations/mcp/getting-started)** - Clean up sloppy AI code and prevent vulnerabilities\n- **[ZenML](https://github.com/zenml-io/mcp-zenml)** - Interact with your MLOps and LLMOps pipelines through your [ZenML](https://www.zenml.io) MCP server\n- **[ZettelkastenSpace](https://github.com/joshylchen/zettelkasten_space)** - Built on the proven [Zettelkasten](https://www.zettelkasten.space/) method, enhanced with Claude Desktop integration via Model Context Protocol \n- <img height=\"12\" width=\"12\" src=\"https://www.zine.ai/images/zine-logo.png\" alt=\"Zine Logo\" /> **[Zine](https://www.zine.ai)** - Your memory, everywhere AI goes. Think iPhoto for your knowledge - upload and curate. Like ChatGPT but portable - context that travels with you.\n- <img height=\"12\" width=\"12\" src=\"https://zizai.work/images/logo.jpg\" alt=\"ZIZAI Logo\" /> **[ZIZAI Recruitment](https://github.com/zaiwork/mcp)** - Interact with the next-generation intelligent recruitment platform for employees and employers, powered by [ZIZAI Recruitment](https://zizai.work).\n\n### 🌎 Community Servers\n\nA growing set of community-developed and maintained servers demonstrates various applications of MCP across different domains.\n\n> [!NOTE]\n> Community servers are **untested** and should be used at **your own risk**. They are not affiliated with or endorsed by Anthropic.\n\n- **[1mcpserver](https://github.com/particlefuture/1mcpserver)** - MCP of MCPs. Automatically discover, configure, and add MCP servers on your local machine.\n- **[1Panel](https://github.com/1Panel-dev/mcp-1panel)** - MCP server implementation that provides 1Panel interaction.\n- **[A2A](https://github.com/GongRzhe/A2A-MCP-Server)** - An MCP server that bridges the Model Context Protocol (MCP) with the Agent-to-Agent (A2A) protocol, enabling MCP-compatible AI assistants (like Claude) to seamlessly interact with A2A agents.\n- **[Ableton Live](https://github.com/Simon-Kansara/ableton-live-mcp-server)** - an MCP server to control Ableton Live.\n- **[Ableton Live](https://github.com/ahujasid/ableton-mcp)** (by ahujasid) - Ableton integration allowing prompt enabled music creation.\n- **[ActivityPub MCP](https://github.com/cameronrye/activitypub-mcp)** - A comprehensive MCP server that enables LLMs to explore and interact with the Fediverse through ActivityPub protocol, supporting actor discovery, timeline fetching, instance exploration, and WebFinger resolution across decentralized social networks.\n- **[Actor Critic Thinking](https://github.com/aquarius-wing/actor-critic-thinking-mcp)** - Actor-critic thinking for performance evaluation\n- **[Adobe Commerce](https://github.com/rafaelstz/adobe-commerce-dev-mcp)** — MCP to interact with Adobe Commerce GraphQL API, including orders, products, customers, etc.\n- **[ADR Analysis](https://github.com/tosin2013/mcp-adr-analysis-server)** - AI-powered Architectural Decision Records (ADR) analysis server that provides architectural insights, technology stack detection, security checks, and TDD workflow enhancement for software development projects.\n- **[Ads MCP](https://github.com/amekala/ads-mcp)** - Remote MCP server for cross-platform ad campaign creation (Google Ads Search & PMax, TikTok). OAuth 2.1 authentication with progress streaming support for long-running operations. [Website](https://www.adspirer.com/)\n- **[Agent Interviews](https://github.com/thinkchainai/agentinterviews_mcp)** - Conduct AI-powered qualitative research interviews and surveys at scale with [Agent Interviews](https://agentinterviews.com).\n- **[AgentBay](https://github.com/Michael98671/agentbay)** - An MCP server for providing serverless cloud infrastructure for AI agents.\n- **[Agentic Framework](https://github.com/Piotr1215/mcp-agentic-framework)** - Multi-agent collaboration framework enabling AI agents to register, discover each other, exchange asynchronous messages via HTTP transport, and work together on complex tasks with persistent message history.\n- **[AgentMode](https://www.agentmode.app)** - Connect to dozens of databases, data warehouses, Github & more, from a single MCP server.  Run the Docker image locally, in the cloud, or on-premise.\n- **[AI Agent Marketplace Index](https://github.com/AI-Agent-Hub/ai-agent-marketplace-index-mcp)** - MCP server to search more than 5000+ AI agents and tools of various categories from [AI Agent Marketplace Index](http://www.deepnlp.org/store/ai-agent) and monitor traffic of AI Agents.\n- **[AI Endurance](https://github.com/ai-endurance/mcp)** - AI-powered training platform for runners, cyclists, and triathletes with over 20 tools for workout management, activity analysis, performance predictions, and recovery tracking.\n- **[AI Tasks](https://github.com/jbrinkman/valkey-ai-tasks)** - Let the AI manage complex plans with integrated task management and tracking tools. Supports STDIO, SSE and Streamable HTTP transports.\n- **[ai-Bible](https://github.com/AdbC99/ai-bible)** - Search the bible reliably and repeatably [ai-Bible Labs](https://ai-bible.com)\n- **[Airbnb](https://github.com/openbnb-org/mcp-server-airbnb)** - Provides tools to search Airbnb and get listing details.\n- **[Airflow](https://github.com/yangkyeongmo/mcp-server-apache-airflow)** - An MCP Server that connects to [Apache Airflow](https://airflow.apache.org/) using official python client.\n- **[Airtable](https://github.com/domdomegg/airtable-mcp-server)** - Read and write access to [Airtable](https://airtable.com/) databases, with schema inspection.\n- **[Airtable](https://github.com/felores/airtable-mcp)** - Airtable Model Context Protocol Server.\n- **[Algorand](https://github.com/GoPlausible/algorand-mcp)** - A comprehensive MCP server for tooling interactions (40+) and resource accessibility (60+) plus many useful prompts for interacting with the Algorand blockchain.\n- **[Amadeus](https://github.com/donghyun-chae/mcp-amadeus)** (by donghyun-chae) - An MCP server to access, explore, and interact with Amadeus Flight Offers Search API for retrieving detailed flight options, including airline, times, duration, and pricing data.\n- **[Amazon Ads](https://github.com/MarketplaceAdPros/amazon-ads-mcp-server)** - MCP Server that provides interaction capabilities with Amazon Advertising through [MarketplaceAdPros](https://marketplaceadpros.com)/\n- **[AniList](https://github.com/yuna0x0/anilist-mcp)** (by yuna0x0) - An MCP server to interact with AniList API, allowing you to search for anime and manga, retrieve user data, and manage your watchlist.\n- **[Anki](https://github.com/scorzeth/anki-mcp-server)** - An MCP server for interacting with your [Anki](https://apps.ankiweb.net) decks and cards.\n- **[Anki](https://github.com/nietus/anki-mcp)** - MCP server to run locally with Anki and Ankiconnect. Supports creating, updating, searching and filtering cards and decks. Include mass update and other advanced tools.\n- **[AntV Chart](https://github.com/antvis/mcp-server-chart)** - A Model Context Protocol server for generating 15+ visual charts using [AntV](https://github.com/antvis).\n- **[Any Chat Completions](https://github.com/pyroprompts/any-chat-completions-mcp)** - Interact with any OpenAI SDK Compatible Chat Completions API like OpenAI, Perplexity, Groq, xAI and many more.\n- **[Apache Gravitino(incubating)](https://github.com/datastrato/mcp-server-gravitino)** - Allow LLMs to explore metadata of structured data and unstructured data with Gravitino, and perform data governance tasks including tagging/classification.\n- **[API Lab MCP](https://github.com/atototo/api-lab-mcp)** - Transform Claude into your AI-powered API testing laboratory. Test, debug, and document APIs through natural conversation with authentication support, response validation, and performance metrics.\n- **[APIWeaver](https://github.com/GongRzhe/APIWeaver)** - An MCP server that dynamically creates MCP  servers from web API configurations. This allows you to easily integrate any REST API, GraphQL endpoint, or web service into an MCP-compatible tool that can be used by AI assistants like Claude.\n- **[Apollo IO MCP Server](https://github.com/AgentX-ai/apollo-io-mcp-server)** - apollo.io mcp server. Get/enrich contact data for people and organizations agentically.\n- **[Apple Books](https://github.com/vgnshiyer/apple-books-mcp)** - Interact with your library on Apple Books, manage your book collection, summarize highlights, notes, and much more.\n- **[Apple Calendar](https://github.com/Omar-v2/mcp-ical)** - An MCP server that allows you to interact with your macOS Calendar through natural language, including features such as event creation, modification, schedule listing, finding free time slots etc.\n- **[Apple Docs](https://github.com/kimsungwhee/apple-docs-mcp)** - A powerful Model Context Protocol (MCP) server that provides seamless access to Apple Developer Documentation through natural language queries. Search, explore, and get detailed information about Apple frameworks, APIs, sample code, and more directly in your AI-powered development environment.\n- **[Apple Script](https://github.com/peakmojo/applescript-mcp)** - MCP server that lets LLM run AppleScript code to to fully control anything on Mac, no setup needed.\n- **[APT MCP](https://github.com/GdMacmillan/apt-mcp-server)** - MCP server which runs debian package manager (apt) commands for you using ai agents.\n- **[Aranet4](https://github.com/diegobit/aranet4-mcp-server)** - MCP Server to manage your Aranet4 CO2 sensor. Fetch data and store in a local SQLite. Ask questions about historical data.\n- **[ArangoDB](https://github.com/ravenwits/mcp-server-arangodb)** - MCP Server that provides database interaction capabilities through [ArangoDB](https://arangodb.com/).\n- **[ArangoDB Graph](https://github.com/PCfVW/mcp-arangodb-async)** - Async-first Python architecture, wrapping the official [python-arango driver](https://github.com/arangodb/python-arango) with graph management capabilities, content conversion utilities (JSON, Markdown, YAML and Table), backup/restore functionality, and graph analytics capabilities; the 33 MCP tools use strict [Pydantic](https://github.com/pydantic/pydantic) validation.\n- **[Archestra.AI](https://github.com/archestra-ai/archestra)** - Open-source enterprise-ready MCP gateway, MCP registry, MCP orchestrator, MCP credentials management, LLM cost management and chat platform.\n- **[Arduino](https://github.com/vishalmysore/choturobo)** - MCP Server that enables AI-powered robotics using Claude AI and Arduino (ESP32) for real-world automation and interaction with robots.\n- **[arXiv API](https://github.com/prashalruchiranga/arxiv-mcp-server)** - An MCP server that enables interacting with the arXiv API using natural language.\n- **[arxiv-latex-mcp](https://github.com/takashiishida/arxiv-latex-mcp)** - MCP server that fetches and processes arXiv LaTeX sources for precise interpretation of mathematical expressions in papers.\n- **[Arr Suite](https://github.com/shaktech786/arr-suite-mcp-server)** - Intelligent MCP server for Plex and the complete *arr media automation suite (Sonarr, Radarr, Prowlarr, Bazarr, Overseerr) with natural language processing for unified media management.\n- **[Atlassian](https://github.com/sooperset/mcp-atlassian)** - Interact with Atlassian Cloud products (Confluence and Jira) including searching/reading Confluence spaces/pages, accessing Jira issues, and project metadata.\n- **[Atlassian Server (by phuc-nt)](https://github.com/phuc-nt/mcp-atlassian-server)** - An MCP server that connects AI agents (Cline, Claude Desktop, Cursor, etc.) to Atlassian Jira & Confluence, enabling data queries and actions through the Model Context Protocol.\n- **[Attestable MCP](https://github.com/co-browser/attestable-mcp-server)** - An MCP server running inside a trusted execution environment (TEE) via Gramine, showcasing remote attestation using [RA-TLS](https://gramine.readthedocs.io/en/stable/attestation.html). This allows an MCP client to verify the server before connecting.\n- **[Audius](https://github.com/glassBead-tc/audius-mcp-atris)** - Audius + AI = Atris. Interact with fans, stream music, tip your favorite artists, and more on Audius: all through Claude.\n- **[AutoML](https://github.com/emircansoftware/MCP_Server_DataScience)** – An MCP server for data analysis workflows including reading, preprocessing, feature engineering, model selection, visualization, and hyperparameter tuning.\n- **[Aviationstack](https://github.com/Pradumnasaraf/aviationstack-mcp)** – An MCP server using the AviationStack API to fetch real-time flight data including airline flights, airport schedules, future flights and aircraft types.\n- **[AWS](https://github.com/rishikavikondala/mcp-server-aws)** - Perform operations on your AWS resources using an LLM.\n- **[AWS Athena](https://github.com/lishenxydlgzs/aws-athena-mcp)** - An MCP server for AWS Athena to run SQL queries on Glue Catalog.\n- **[AWS Cognito](https://github.com/gitCarrot/mcp-server-aws-cognito)** - An MCP server that connects to AWS Cognito for authentication and user management.\n- **[AWS Cost Explorer](https://github.com/aarora79/aws-cost-explorer-mcp-server)** - Optimize your AWS spend (including Amazon Bedrock spend) with this MCP server by examining spend across regions, services, instance types and foundation models ([demo video](https://www.youtube.com/watch?v=WuVOmYLRFmI&feature=youtu.be)).\n- **[AWS Open Data](https://github.com/domdomegg/aws-open-data-mcp)** - Search and explore datasets from the AWS Open Data Registry with fuzzy matching and detailed dataset information.\n- **[AWS Resources Operations](https://github.com/baryhuang/mcp-server-aws-resources-python)** - Run generated python code to securely query or modify any AWS resources supported by boto3.\n- **[AWS S3](https://github.com/aws-samples/sample-mcp-server-s3)** - A sample MCP server for AWS S3 that flexibly fetches objects from S3 such as PDF documents.\n- **[AWS SES](https://github.com/aws-samples/sample-for-amazon-ses-mcp)** Sample MCP Server for Amazon SES (SESv2). See [AWS blog post](https://aws.amazon.com/blogs/messaging - and-targeting/use-ai-agents-and-the-model-context-protocol-with-amazon-ses/) for more details.\n- **[AX-Platform](https://github.com/AX-MCP/PaxAI?tab=readme-ov-file#mcp-setup-guides)** - AI Agent collaboration platform. Collaborate on tasks, share context, and coordinate workflows.\n- **[Azure ADX](https://github.com/pab1it0/adx-mcp-server)** - Query and analyze Azure Data Explorer databases.\n- **[Azure DevOps](https://github.com/Vortiago/mcp-azure-devops)** - An MCP server that provides a bridge to Azure DevOps services, enabling AI assistants to query and manage work items.\n- **[Azure MCP Hub](https://github.com/Azure-Samples/mcp)** - A curated list of all MCP servers and related resources for Azure developers by **[Arun Sekhar](https://github.com/achandmsft)**\n- **[Azure OpenAI DALL-E 3 MCP Server](https://github.com/jacwu/mcp-server-aoai-dalle3)** - An MCP server for Azure OpenAI DALL-E 3 service to generate image from text.\n- **[Azure Wiki Search](https://github.com/coder-linping/azure-wiki-search-server)** - An MCP that enables AI to query the wiki hosted on Azure Devops Wiki.\n- **[Baidu AI Search](https://github.com/baidubce/app-builder/tree/master/python/mcp_server/ai_search)** - Web search with Baidu Cloud's AI Search\n- **[BambooHR MCP](https://github.com/encoreshao/bamboohr-mcp)** - An MCP server that interfaces with the BambooHR APIs, providing access to employee data, time tracking, and HR management features.\n- **[Base Free USDC Transfer](https://github.com/magnetai/mcp-free-usdc-transfer)** - Send USDC on [Base](https://base.org) for free using Claude AI! Built with [Coinbase CDP](https://docs.cdp.coinbase.com/mpc-wallet/docs/welcome).\n- **[Basic Memory](https://github.com/basicmachines-co/basic-memory)** - Local-first knowledge management system that builds a semantic graph from Markdown files, enabling persistent memory across conversations with LLMs.\n- **[BGG MCP](https://github.com/kkjdaniel/bgg-mcp)** (by kkjdaniel) - MCP to enable interaction with the BoardGameGeek API via AI tooling.\n- **[Bible](https://github.com/trevato/bible-mcp)** - Add biblical context to your generative AI applications.\n- **[BigQuery](https://github.com/LucasHild/mcp-server-bigquery)** (by LucasHild) - This server enables LLMs to inspect database schemas and execute queries on BigQuery.\n- **[BigQuery](https://github.com/ergut/mcp-bigquery-server)** (by ergut) - Server implementation for Google BigQuery integration that enables direct BigQuery database access and querying capabilities\n- **[Bilibili](https://github.com/wangshunnn/bilibili-mcp-server)** - This MCP server provides tools to fetch Bilibili user profiles, video metadata, search videos, and more.\n- **[Binance](https://github.com/ethancod1ng/binance-mcp-server)** - Cryptocurrency trading and market data access through Binance API integration.\n- **[Binance](https://github.com/AnalyticAce/binance-mcp-server)** (by dosseh shalom) - Unofficial tools and server implementation for Binance's Model Context Protocol (MCP). Designed to support developers building crypto trading AI Agents.\n- **[Bing Web Search API](https://github.com/leehanchung/bing-search-mcp)** (by hanchunglee) - Server implementation for Microsoft Bing Web Search API.\n- **[BioMCP](https://github.com/genomoncology/biomcp)** (by imaurer) - Biomedical research assistant server providing access to PubMed, ClinicalTrials.gov, and MyVariant.info.\n- **[bioRxiv](https://github.com/JackKuo666/bioRxiv-MCP-Server)** - 🔍 Enable AI assistants to search and access bioRxiv papers through a simple MCP interface.\n- **[Bitable MCP](https://github.com/lloydzhou/bitable-mcp)** (by lloydzhou) - MCP server provides access to Lark Bitable through the Model Context Protocol. It allows users to interact with Bitable tables using predefined tools.\n- **[Blender](https://github.com/ahujasid/blender-mcp)** (by ahujasid) - Blender integration allowing prompt enabled 3D scene creation, modeling and manipulation.\n- **[Blender MCP](https://github.com/pranav-deshmukh/blender-mcp)** - MCP server to create professional like 3d scenes on blender using natural language.\n- **[Blockbench MCP Plugin](https://github.com/jasonjgardner/blockbench-mcp-plugin)** (by jasonjgardner) - Blockbench plugin to connect AI agents to Blockbench's JavaScript API. Allows for creating and editing 3D models or pixel art textures with AI in Blockbench.\n- **[Blockchain MCP](https://github.com/tatumio/blockchain-mcp)** - MCP Server for Blockchain Data from **[Tatum](http://tatum.io/mcp)** that instantly unlocks blockchain access for your AI agents. This official Tatum MCP server connects to any LLM in seconds.\n- **[Bluesky](https://github.com/semioz/bluesky-mcp)** (by semioz) - An MCP server for Bluesky, a decentralized social network. It enables automated interactions with the AT Protocol, supporting features like posting, liking, reposting, timeline management, and profile operations.\n- **[Bluetooth MCP Server](https://github.com/Hypijump31/bluetooth-mcp-server)** - Control Bluetooth devices and manage connections through natural language commands, including device discovery, pairing, and audio controls.\n- **[BNBChain MCP](https://github.com/bnb-chain/bnbchain-mcp)** - An MCP server for interacting with BSC, opBNB, and the Greenfield blockchain.\n- **[Braintree](https://github.com/QuentinCody/braintree-mcp-server)** - Unofficial PayPal Braintree payment gateway MCP Server for AI agents to process payments, manage customers, and handle transactions securely.\n- **[Brazilian Law](https://github.com/pdmtt/brlaw_mcp_server/)** (by pdmtt) - Agent-driven research on Brazilian law using official sources.\n- **[BreakoutRoom](https://github.com/agree-able/room-mcp)** - Agents accomplishing goals together in p2p rooms\n- **[Browser MCP](https://github.com/bytedance/UI-TARS-desktop/tree/main/packages/agent-infra/mcp-servers/browser)** (by UI-TARS) - A fast, lightweight MCP server that empowers LLMs with browser automation via Puppeteer’s structured accessibility data, featuring optional vision mode for complex visual understanding and flexible, cross-platform configuration.\n- **[browser-use](https://github.com/co-browser/browser-use-mcp-server)** (by co-browser) - browser-use MCP server with dockerized playwright + chromium + vnc. supports stdio & resumable http.\n- **[BrowserLoop](https://github.com/mattiasw/browserloop)** - An MCP server for taking screenshots of web pages using Playwright. Supports high-quality capture with configurable formats, viewport sizes, cookie-based authentication, and both full page and element-specific screenshots.\n- **[Bsc-mcp](https://github.com/TermiX-official/bsc-mcp)** The first MCP server that serves as the bridge between AI and BNB Chain, enabling AI agents to execute complex on-chain operations through seamless integration with the BNB Chain, including transfer, swap, launch, security check on any token and even more.\n- **[BugBug MCP Server](https://github.com/simplypixi/bugbug-mcp-server)** - Unofficial MCP server for BugBug API.\n- **[BVG MCP Server - (Unofficial) ](https://github.com/svkaizoku/mcp-bvg)** - Unofficial MCP server for Berliner Verkehrsbetriebe Api.\n- **[Bybit](https://github.com/ethancod1ng/bybit-mcp-server)** - A Model Context Protocol (MCP) server for integrating AI assistants with Bybit cryptocurrency exchange APIs, enabling automated trading, market data access, and account management.\n- **[C64 Bridge](https://github.com/chrisgleissner/c64bridge)** - AI command bridge for Commodore 64 hardware. Control Ultimate 64 and C64 Ultimate devices through REST API with BASIC and assembly program creation, real-time memory inspection, SID audio synthesis, and curated retro computing knowledge via local RAG.\n- **[CAD-MCP](https://github.com/daobataotie/CAD-MCP#)** (by daobataotie) - Drawing CAD(Line,Circle,Text,Annotation...) through MCP server, supporting mainstream CAD software.\n- **[Calculator](https://github.com/githejie/mcp-server-calculator)** - This server enables LLMs to use calculator for precise numerical calculations.\n- **[CalDAV MCP](https://github.com/dominik1001/caldav-mcp)** - A CalDAV MCP server to expose calendar operations as tools for AI assistants.\n- **[Calendly-mcp-server](https://github.com/meAmitPatil/calendly-mcp-server)** - Open source calendly mcp server.\n- **[Catalysis Hub](https://github.com/QuentinCody/catalysishub-mcp-server)** - Unofficial MCP server for searching and retrieving scientific data from the Catalysis Hub database, providing access to computational catalysis research and surface reaction data.\n- **[CCTV VMS MCP](https://github.com/jyjune/mcp_vms)** - A Model Context Protocol (MCP) server designed to connect to a CCTV recording program (VMS) to retrieve recorded and live video streams. It also provides tools to control the VMS software, such as showing live or playback dialogs for specific channels at specified times.\n- **[CFBD API](https://github.com/lenwood/cfbd-mcp-server)** - An MCP server for the [College Football Data API](https://collegefootballdata.com/).\n- **[ChatMCP](https://github.com/AI-QL/chat-mcp)** – An Open Source Cross-platform GUI Desktop application compatible with Linux, macOS, and Windows, enabling seamless interaction with MCP servers across dynamically selectable LLMs, by **[AIQL](https://github.com/AI-QL)**\n- **[ChatSum](https://github.com/mcpso/mcp-server-chatsum)** - Query and Summarize chat messages with LLM. by [mcpso](https://mcp.so)\n- **[Chess.com](https://github.com/pab1it0/chess-mcp)** - Access Chess.com player data, game records, and other public information through standardized MCP interfaces, allowing AI assistants to search and analyze chess information.\n- **[Chessagine-mcp](https://github.com/jalpp/chessagine-mcp)** - A chess MCP server that integrates Stockfish engine evaluation, positional theme analysis, Lichess opening databases, and chess knowledgebase.\n- **[ChessPal Chess Engine (stockfish)](https://github.com/wilson-urdaneta/chesspal-mcp-engine)** - A Stockfish-powered chess engine exposed as an MCP server. Calculates best moves and supports both HTTP/SSE and stdio transports.\n- **[Chroma](https://github.com/privetin/chroma)** - Vector database server for semantic document search and metadata filtering, built on Chroma\n- **[Chrome history](https://github.com/vincent-pli/chrome-history-mcp)** - Talk with AI about your browser history, get fun ^_^\n- **[cicada](https://github.com/wende/cicada)** - AST-powered code intelligence for Elixir projects. Provides 9 tools including function search, call site tracking, PR attribution, git history, and semantic search - reducing AI query tokens by 82%.\n- **[CIViC](https://github.com/QuentinCody/civic-mcp-server)** - MCP server for the Clinical Interpretation of Variants in Cancer (CIViC) database, providing access to clinical variant interpretations and genomic evidence for cancer research.\n- **[Claude Thread Continuity](https://github.com/peless/claude-thread-continuity)** - Persistent memory system enabling Claude Desktop conversations to resume with full context across sessions. Maintains conversation history, project states, and user preferences for seamless multi-session workflows.\n- **[claude-faf-mcp](https://github.com/Wolfe-Jam/claude-faf-mcp)** - MCP server for .faf format. Context scoring engine with project context management.\n- **[ClaudePost](https://github.com/ZilongXue/claude-post)** - ClaudePost enables seamless email management for Gmail, offering secure features like email search, reading, and sending.\n- **[CLDGeminiPDF Analyzer](https://github.com/tfll37/CLDGeminiPDF-Analyzer)** - MCP server tool enabling sharing large PDF files to Google LLMs via API for further/additional analysis and response retrieval to Claude Desktop.\n- **[ClearML MCP](https://github.com/prassanna-ravishankar/clearml-mcp)** - Get comprehensive ML experiment context and analysis directly from [ClearML](https://clear.ml) in your AI conversations.\n- **[ClickUp](https://github.com/TaazKareem/clickup-mcp-server)** - MCP server for ClickUp task management, supporting task creation, updates, bulk operations, and markdown descriptions.\n- **[Cloudinary](https://github.com/felores/cloudinary-mcp-server)** - Cloudinary Model Context Protocol Server to upload media to Cloudinary and get back the media link and details.\n- **[CockroachDB](https://github.com/amineelkouhen/mcp-cockroachdb)** - MCP server enabling AI agents and LLMs to manage, monitor, and query **[CockroachDB](https://www.cockroachlabs.com/)** using natural language.\n- **[CockroachDB MCP Server](https://github.com/viragtripathi/cockroachdb-mcp-server)** – Full - featured MCP implementation built with FastAPI and CockroachDB. Supports schema bootstrapping, JSONB storage, LLM-ready CLI, and optional `/debug` endpoints.\n- **[Code Screenshot Generator](https://github.com/MoussaabBadla/code-screenshot-mcp)** - Generate beautiful syntax-highlighted code screenshots with professional themes directly from Claude. Supports file reading, line selection, git diff visualization, and batch processing.\n- **[code-assistant](https://github.com/stippi/code-assistant)** - A coding assistant MCP server that allows to explore a code-base and make changes to code. Should be used with trusted repos only (insufficient protection against prompt injections).\n- **[code-context-provider-mcp](https://github.com/AB498/code-context-provider-mcp)** - MCP server that provides code context and analysis for AI assistants. Extracts directory structure and code symbols using WebAssembly Tree-sitter parsers without Native Dependencies.\n- **[code-executor](https://github.com/bazinga012/mcp_code_executor)** - An MCP server that allows LLMs to execute Python code within a specified Conda environment.\n- **[code-sandbox-mcp](https://github.com/Automata-Labs-team/code-sandbox-mcp)** - An MCP server to create secure code sandbox environment for executing code within Docker containers.\n- **[cognee-mcp](https://github.com/topoteretes/cognee/tree/main/cognee-mcp)** - GraphRAG memory server with customizable ingestion, data processing and search\n- **[coin_api_mcp](https://github.com/longmans/coin_api_mcp)** - Provides access to [coinmarketcap](https://coinmarketcap.com/) cryptocurrency data.\n- **[CoinMarketCap](https://github.com/shinzo-labs/coinmarketcap-mcp)** - Implements the complete [CoinMarketCap](https://coinmarketcap.com/) API for accessing cryptocurrency market data, exchange information, and other blockchain-related metrics.\n- **[commands](https://github.com/g0t4/mcp-server-commands)** - Run commands and scripts. Just like in a terminal.\n- **[Companies House MCP](https://github.com/stefanoamorelli/companies-house-mcp)** (by Stefano Amorelli) - MCP server to connect with the UK Companies House API.\n- **[computer-control-mcp](https://github.com/AB498/computer-control-mcp)** - MCP server that provides computer control capabilities, like mouse, keyboard, OCR, etc. using PyAutoGUI, RapidOCR, ONNXRuntime Without External Dependencies.\n- **[Computer-Use - Remote MacOS Use](https://github.com/baryhuang/mcp-remote-macos-use)** - Open-source out-of-the-box alternative to OpenAI Operator, providing a full desktop experience and optimized for using remote macOS machines as autonomous AI agents.\n- **[computer-use-mcp](https://github.com/domdomegg/computer-use-mcp)** - Control your computer with screen capture, mouse, and keyboard capabilities for automated desktop interaction and task execution.\n- **[Congress.gov API](https://github.com/AshwinSundar/congress_gov_mcp)** - An MCP server to interact with real-time data from the Congress.gov API, which is the official API for the United States Congress.\n- **[Console Automation](https://github.com/ooples/mcp-console-automation)** - Production-ready MCP server for AI-driven console automation and monitoring. 40 tools for session management, SSH, testing, monitoring, and background jobs. Like Playwright for terminal applications.\n- **[consul-mcp](https://github.com/kocierik/consul-mcp-server)** - A consul MCP server for service management, health check and Key-Value Store\n- **[consult7](https://github.com/szeider/consult7)** - Analyze large codebases and document collections using high-context models via OpenRouter, OpenAI, or Google AI -- very useful, e.g., with Claude Code\n- **[Contentful-mcp](https://github.com/ivo-toby/contentful-mcp)** - Read, update, delete, publish content in your [Contentful](https://contentful.com) space(s) from this MCP Server.\n- **[Context Crystallizer](https://github.com/hubertciebiada/context-crystallizer)** - AI Context Engineering tool that transforms large repositories into crystallized, AI-consumable knowledge through systematic analysis and optimization.\n- **[Context Processor](https://github.com/mschultheiss83/context-processor)** - Intelligent context management with configurable pre-processing strategies (clarify, analyze, search, fetch) for enhancing content clarity, searchability, and metadata extraction.\n- **[context-portal](https://github.com/GreatScottyMac/context-portal)** - Context Portal (ConPort) is a memory bank database system that effectively builds a project-specific knowledge graph, capturing entities like decisions, progress, and architecture, along with their relationships. This serves as a powerful backend for Retrieval Augmented Generation (RAG), enabling AI assistants to access precise, up-to-date project information.\n- **[cplusplus-mcp](https://github.com/kandrwmrtn/cplusplus_mcp)** - Semantic C++ code analysis using libclang. Enables Claude to understand C++ codebases through AST parsing rather than text search - find classes, navigate inheritance, trace function calls, and explore code relationships.\n- **[CRASH](https://github.com/nikkoxgonzales/crash-mcp)** - MCP server for structured, iterative reasoning and thinking with flexible validation, confidence tracking, revision mechanisms, and branching support.\n- **[CreateveAI Nexus](https://github.com/spgoodman/createveai-nexus-server)** - Open-Source Bridge Between AI Agents and Enterprise Systems, with simple custom API plug-in capabilities (including close compatibility with ComfyUI nodes), support for Copilot Studio's MCP agent integations, and support for Azure deployment in secure environments with secrets stored in Azure Key Vault, as well as straightforward on-premises deployment.\n- **[Creatify](https://github.com/TSavo/creatify-mcp)** - MCP Server that exposes Creatify AI API capabilities for AI video generation, including avatar videos, URL-to-video conversion, text-to-speech, and AI-powered editing tools.\n- **[Cronlytic](https://github.com/Cronlytic/cronlytic-mcp-server)** - Create CRUD operations for serverless cron jobs through [Cronlytic](https://cronlytic.com) MCP Server\n- **[crypto-feargreed-mcp](https://github.com/kukapay/crypto-feargreed-mcp)**  -  Providing real-time and historical Crypto Fear & Greed Index data.\n- **[crypto-indicators-mcp](https://github.com/kukapay/crypto-indicators-mcp)**  -  An MCP server providing a range of cryptocurrency technical analysis indicators and strategies.\n- **[crypto-sentiment-mcp](https://github.com/kukapay/crypto-sentiment-mcp)**  -  An MCP server that delivers cryptocurrency sentiment analysis to AI agents.\n- **[cryptopanic-mcp-server](https://github.com/kukapay/cryptopanic-mcp-server)** - Providing latest cryptocurrency news to AI agents, powered by CryptoPanic.\n- **[CSV Editor](https://github.com/santoshray02/csv-editor)** - Comprehensive CSV processing with 40+ operations for data manipulation, analysis, and validation. Features auto-save, undo/redo, and handles GB+ files. Built with FastMCP & Pandas.\n- **[Current Time UTC MCP Server](https://github.com/jairampatel/currenttimeutc-mcp)** - A lightweight MCP server that provides accurate UTC time and timezone conversions in real-time.\n- **[Cursor MCP Installer](https://github.com/matthewdcage/cursor-mcp-installer)** - A tool to easily install and configure other MCP servers within Cursor IDE, with support for npm packages, local directories, and Git repositories.\n- **[CV Forge](https://github.com/thechandanbhagat/cv-forge)** - An intelligent MCP (Model Context Protocol) server that analyzes job postings and crafts perfectly-matched CVs (by [Chandan Bhagat](https://me.chandanbhagat.com.np)).\n- **[CVE Intelligence Server](https://github.com/gnlds/mcp-cve-intelligence-server-lite)** – Provides vulnerability intelligence via multi - source CVE data, essential exploit discovery, and EPSS risk scoring through the MCP. Useful for security research, automation, and agent workflows.\n- **[D365FO](https://github.com/mafzaal/d365fo-client)** - A comprehensive MCP server for Microsoft Dynamics 365 Finance & Operations (D365 F&O) that provides easy access to OData endpoints, metadata operations, label management, and AI assistant integration.\n- **[Dagster](https://github.com/dagster-io/dagster/tree/master/python_modules/libraries/dagster-dg-cli)** - An MCP server to easily build data pipelines using [Dagster](https://dagster.io/).\n- **[Dappier](https://github.com/DappierAI/dappier-mcp)** - Connect LLMs to real-time, rights-cleared, proprietary data from trusted sources. Access specialized models for Real-Time Web Search, News, Sports, Financial Data, Crypto, and premium publisher content. Explore data models at [marketplace.dappier.com](https://marketplace.dappier.com/marketplace).\n- **[Data Exploration](https://github.com/reading-plus-ai/mcp-server-data-exploration)** - MCP server for autonomous data exploration on .csv-based datasets, providing intelligent insights with minimal effort. NOTE: Will execute arbitrary Python code on your machine, please use with caution!\n- **[Data4library](https://github.com/isnow890/data4library-mcp)** (by isnow890) - MCP server for Korea's Library Information Naru API, providing comprehensive access to public library data, book searches, loan status, reading statistics, and GPS-based nearby library discovery across South Korea.\n- **[Databricks](https://github.com/JordiNeil/mcp-databricks-server)** - Allows LLMs to run SQL queries, list and get details of jobs executions in a Databricks account.\n- **[Databricks Genie](https://github.com/yashshingvi/databricks-genie-MCP)** - A server that connects to the Databricks Genie, allowing LLMs to ask natural language questions, run SQL queries, and interact with Databricks conversational agents.\n- **[Databricks Smart SQL](https://github.com/RafaelCartenet/mcp-databricks-server)** - Leveraging Databricks Unity Catalog metadata, perform smart efficient SQL queries to solve Ad-hoc queries and explore data.\n- **[DataCite](https://github.com/QuentinCody/datacite-mcp-server)** - Unofficial MCP server for DataCite, providing access to research data and publication metadata through DataCite's REST API and GraphQL interface for scholarly research discovery.\n- **[Datadog](https://github.com/GeLi2001/datadog-mcp-server)** - Datadog MCP Server for application tracing, monitoring, dashboard, incidents queries built on official datadog api.\n- **[Dataset Viewer](https://github.com/privetin/dataset-viewer)** - Browse and analyze Hugging Face datasets with features like search, filtering, statistics, and data export\n- **[Dataverse DevTools MCP Server](https://github.com/vignaesh01/DataverseDevToolsMcpServer)** - An MCP server exposing ready-to-use Dataverse/Dynamics 365 tools for user and security administration, data operations, Web API executions, metadata exploration, and troubleshooting.\n- **[DataWorks](https://github.com/aliyun/alibabacloud-dataworks-mcp-server)** - A Model Context Protocol (MCP) server that provides tools for AI, allowing it to interact with the [DataWorks](https://www.alibabacloud.com/help/en/dataworks/) Open API through a standardized interface. This implementation is based on the Alibaba Cloud Open API and enables AI agents to perform cloud resources operations seamlessly.\n- **[DaVinci Resolve](https://github.com/samuelgursky/davinci-resolve-mcp)** - MCP server integration for DaVinci Resolve providing powerful tools for video editing, color grading, media management, and project control.\n- **[DBHub](https://github.com/bytebase/dbhub/)** - Universal database MCP server connecting to MySQL, MariaDB, PostgreSQL, and SQL Server.\n- **[Deebo](https://github.com/snagasuri/deebo-prototype)** – Agentic debugging MCP server that helps AI coding agents delegate and fix hard bugs through isolated multi-agent hypothesis testing.\n- **[Deep Research](https://github.com/reading-plus-ai/mcp-server-deep-research)** - Lightweight MCP server offering Grok/OpenAI/Gemini/Perplexity-style automated deep research exploration and structured reporting.\n- **[DeepSeek MCP Server](https://github.com/DMontgomery40/deepseek-mcp-server)** - Model Context Protocol server integrating DeepSeek's advanced language models, in addition to [other useful API endpoints](https://github.com/DMontgomery40/deepseek-mcp-server?tab=readme-ov-file#features)\n- **[deepseek-thinker-mcp](https://github.com/ruixingshi/deepseek-thinker-mcp)** - A MCP (Model Context Protocol) provider Deepseek reasoning content to MCP-enabled AI Clients, like Claude Desktop. Supports access to Deepseek's thought processes from the Deepseek API service or from a local Ollama server.\n- **[Deepseek_R1](https://github.com/66julienmartin/MCP-server-Deepseek_R1)** - A Model Context Protocol (MCP) server implementation connecting Claude Desktop with DeepSeek's language models (R1/V3)\n- **[DeFi Rates](https://github.com/qingfeng/defi-rates-mcp)** - Query real-time DeFi lending rates across 13+ protocols (Aave, Morpho, Compound, Venus, Solend, Drift, Jupiter, etc.). Compare rates, search best opportunities, and calculate looping strategies across Ethereum, Arbitrum, Base, BSC, Solana, and HyperEVM.\n- **[Defuddle Fetch](https://github.com/domdomegg/defuddle-fetch-mcp-server)** - Fetch web content with enhanced extraction using Defuddle, converting pages to clean markdown with better results than standard HTML-to-markdown converters.\n- **[deploy-mcp](https://github.com/alexpota/deploy-mcp)** - Universal deployment tracker for AI assistants with live status badges and deployment monitoring.\n- **[Depyler](https://github.com/paiml/depyler/blob/main/docs/mcp-integration.md)** - Energy-efficient Python-to-Rust transpiler with progressive verification, enabling AI assistants to convert Python code to safe, performant Rust while reducing energy consumption by 75-85%.\n- **[Descope](https://github.com/descope-sample-apps/descope-mcp-server)** - An MCP server to integrate with [Descope](https://descope.com) to search audit logs, manage users, and more.\n- **[DesktopCommander](https://github.com/wonderwhy-er/DesktopCommanderMCP)** - Let AI edit and manage files on your computer, run terminal commands, and connect to remote servers via SSH - all powered by one of the most popular local MCP servers.\n- **[Devcontainer](https://github.com/AI-QL/mcp-devcontainers)** - An MCP server for devcontainer to generate and configure development containers directly from devcontainer configuration files.\n- **[DevDb](https://github.com/damms005/devdb-vscode?tab=readme-ov-file#mcp-configuration)** - An MCP server that runs right inside the IDE, for connecting to MySQL, Postgres, SQLite, and MSSQL databases.\n- **[DevOps AI Toolkit](https://github.com/vfarcic/dot-ai)** - AI-powered development productivity platform that enhances software development workflows through intelligent automation and AI-driven assistance.\n- **[DevOps-MCP](https://github.com/wangkanai/devops-mcp)** - Dynamic Azure DevOps MCP server with directory-based authentication switching, supporting work items, repositories, builds, pipelines, and multi-project management with local configuration files.\n- **[DGIdb](https://github.com/QuentinCody/dgidb-mcp-server)** - MCP server for the Drug Gene Interaction Database (DGIdb), providing access to drug-gene interaction data, druggable genome information, and pharmacogenomics research.\n- **[Dicom](https://github.com/ChristianHinge/dicom-mcp)** - An MCP server to query and retrieve medical images and for parsing and reading dicom-encapsulated documents (pdf etc.).\n- **[Dify](https://github.com/YanxingLiu/dify-mcp-server)** - A simple implementation of an MCP server for dify workflows.\n- **[Discogs](https://github.com/cswkim/discogs-mcp-server)** - An MCP server that connects to the Discogs API for interacting with your music collection.\n- **[Discord](https://github.com/v-3/discordmcp)** - An MCP server to connect to Discord guilds through a bot and read and write messages in channels\n- **[Discord](https://github.com/SaseQ/discord-mcp)** - An MCP server, which connects to Discord through a bot, and provides comprehensive integration with Discord.\n- **[Discord](https://github.com/Klavis-AI/klavis/tree/main/mcp_servers/discord)** - For Discord API integration by Klavis AI\n- **[Discourse](https://github.com/AshDevFr/discourse-mcp-server)** - An MCP server to search Discourse posts on a Discourse forum.\n- **[Dispatch Agent](https://github.com/abhinav-mangla/dispatch-agent)** - An intelligent MCP server that provides specialized filesystem operations through ReAct sub-agents.\n- **[DocBase](https://help.docbase.io/posts/3925317)** - Official MCP server for DocBase API integration, enabling post management, user collaboration, group administration, and more.\n- **[Docker](https://github.com/ckreiling/mcp-server-docker)** - Integrate with Docker to manage containers, images, volumes, and networks.\n- **[Docker](https://github.com/0xshariq/docker-mcp-server)** - Docker MCP Server provides advanced, unified Docker management via CLI and MCP workflows, supporting containers, images, volumes, networks, and orchestration.\n- **[Docs](https://github.com/da1z/docsmcp)** - Enable documentation access for the AI agent, supporting llms.txt and other remote or local files.\n- **[documcp](https://github.com/tosin2013/documcp)** - An MCP server for intelligent document processing and management, supporting multiple formats and document operations.\n- **[Docy](https://github.com/oborchers/mcp-server-docy)** - Docy gives your AI direct access to the technical documentation it needs, right when it needs it. No more outdated information, broken links, or rate limits - just accurate, real-time documentation access for more precise coding assistance.\n- **[Dodo Payments](https://github.com/dodopayments/dodopayments-node/tree/main/packages/mcp-server)** - Enables AI agents to securely perform payment operations via a lightweight, serverless-compatible interface to the [Dodo Payments](https://dodopayments.com) API.\n- **[Domain Tools](https://github.com/deshabhishek007/domain-tools-mcp-server)** - A Model Context Protocol (MCP) server for comprehensive domain analysis: WHOIS, DNS records, and DNS health checks.\n- **[Downdetector](https://github.com/domdomegg/downdetector-mcp)** - Check service status and outage information from Downdetector for real-time monitoring of service availability across various platforms and regions.\n- **[DPLP](https://github.com/szeider/mcp-dblp)**  - Searches the [DBLP](https://dblp.org) computer science bibliography database.\n- **[Druid MCP Server](https://github.com/iunera/druid-mcp-server)** - STDIO/SEE MCP Server for Apache Druid by [iunera](https://www.iunera.com) that provides extensive tools, resources, and prompts for managing and analyzing Druid clusters.\n- **[Drupal](https://github.com/Omedia/mcp-server-drupal)** - Server for interacting with [Drupal](https://www.drupal.org/project/mcp) using STDIO transport layer.\n- **[dune-analytics-mcp](https://github.com/kukapay/dune-analytics-mcp)** -  A mcp server that bridges Dune Analytics data to AI agents.\n- **[DynamoDB-Toolbox](https://www.dynamodbtoolbox.com/docs/databases/actions/mcp-toolkit)** - Leverages your Schemas and Access Patterns to interact with your [DynamoDB](https://aws.amazon.com/dynamodb) Database using natural language.\n- **[eBook-mcp](https://github.com/onebirdrocks/ebook-mcp)** - A lightweight MCP server that allows LLMs to read and interact with your personal PDF and EPUB ebooks. Ideal for building AI reading assistants or chat-based ebook interfaces.\n- **[ECharts MCP Server](https://github.com/hustcc/mcp-echarts)** - Generate visual charts using ECharts with AI MCP dynamically, used for chart generation and data analysis.\n- **[EDA MCP Server](https://github.com/NellyW8/mcp-EDA)** - A comprehensive Model Context Protocol server for Electronic Design Automation tools, enabling AI assistants to synthesize Verilog with Yosys, simulate designs with Icarus Verilog, run complete ASIC flows with OpenLane, and view results with GTKWave and KLayout.\n- **[EdgeOne Pages MCP](https://github.com/TencentEdgeOne/edgeone-pages-mcp)** - An MCP service for deploying HTML content to EdgeOne Pages and obtaining a publicly accessible URL.\n- **[Edwin](https://github.com/edwin-finance/edwin/tree/main/examples/mcp-server)** - MCP server for edwin SDK - enabling AI agents to interact with DeFi protocols across EVM, Solana and other blockchains.\n- **[eechat](https://github.com/Lucassssss/eechat)** - An open-source, cross-platform desktop application that seamlessly connects with MCP servers, across Linux, macOS, and Windows.\n- **[Elasticsearch](https://github.com/cr7258/elasticsearch-mcp-server)** - MCP server implementation that provides Elasticsearch interaction.\n- **[ElevenLabs](https://github.com/mamertofabian/elevenlabs-mcp-server)** - A server that integrates with ElevenLabs text-to-speech API capable of generating full voiceovers with multiple voices.\n- **[Email](https://github.com/Shy2593666979/mcp-server-email)** - This server enables users to send emails through various email providers, including Gmail, Outlook, Yahoo, Sina, Sohu, 126, 163, and QQ Mail. It also supports attaching files from specified directories, making it easy to upload attachments along with the email content.\n- **[Email SMTP](https://github.com/egyptianego17/email-mcp-server)** - A simple MCP server that lets your AI agent send emails and attach files through SMTP.\n- **[Enhance Prompt](https://github.com/FelixFoster/mcp-enhance-prompt)** - An MCP service for enhance you prompt.\n- **[Entrez](https://github.com/QuentinCody/entrez-mcp-server)** - Unofficial MCP server for NCBI Entrez databases, providing access to PubMed articles, gene information, protein data, and other biomedical research resources through NCBI's E-utilities API.\n- **[Ergo Blockchain MCP](https://github.com/marctheshark3/ergo-mcp)** -An MCP server to integrate Ergo Blockchain Node and Explorer APIs for checking address balances, analyzing transactions, viewing transaction history, performing forensic analysis of addresses, searching for tokens, and monitoring network status.\n- **[ESP MCP Server](https://github.com/horw/esp-mcp)** - An MCP server that integrates ESP IDF commands like building and flashing code for ESP Microcontrollers using an LLM.\n- **[Eunomia](https://github.com/whataboutyou-ai/eunomia-MCP-server)** - Extension of the Eunomia framework that connects Eunomia instruments with MCP servers\n- **[Everything Search](https://github.com/mamertofabian/mcp-everything-search)** - Fast file searching capabilities across Windows (using [Everything SDK](https://www.voidtools.com/support/everything/sdk/)), macOS (using mdfind command), and Linux (using locate/plocate command).\n- **[EVM MCP Server](https://github.com/mcpdotdirect/evm-mcp-server)** - Comprehensive blockchain services for 30+ EVM networks, supporting native tokens, ERC20, NFTs, smart contracts, transactions, and ENS resolution.\n- **[Excel](https://github.com/haris-musa/excel-mcp-server)** - Excel manipulation including data reading/writing, worksheet management, formatting, charts, and pivot table.\n- **[Excel to JSON MCP by WTSolutions](https://github.com/he-yang/excel-to-json-mcp)** - MCP Server providing a standardized interface for converting (1) Excel or CSV data into JSON format ;(2) Excel(.xlsx) file into Structured JSON.\n- **[Extended Memory](https://github.com/ssmirnovpro/extended-memory-mcp)** - Persistent memory across Claude conversations with multi-project support, automatic importance scoring, and tag-based organization. Production-ready with 400+ tests.\n- **[F1](https://github.com/AbhiJ2706/f1-mcp/tree/main)** - Access to Formula 1 data including race results, driver information, lap times, telemetry, and circuit details.\n- **[Fabi](https://docs.fabi.ai/advanced_features_and_dev_tools/mcp_server)** - MCP server that exposes [Fabi](https://app.fabi.ai/) analyst agent to turn natural-language prompts into insights: navigating connected data, generating safe SQL/Python, running queries, and saving results into dashboards.\n- **[Fabric MCP](https://github.com/aci-labs/ms-fabric-mcp)** - Microsoft Fabric MCP server to accelerate working in your Fabric Tenant with the help of your favorite LLM models.\n- **[Fabric Real-Time Intelligence MCP](https://github.com/Microsoft/fabric-rti-mcp)** - Official Microsoft Fabric RTI server to accelerate working with Eventhouse, Azure Data Explorer(Kusto), Eventstreams and other RTI items using your favorite LLM models.\n- **[fabric-mcp-server](https://github.com/adapoet/fabric-mcp-server)** - The fabric-mcp-server is an MCP server that integrates [Fabric](https://github.com/danielmiessler/fabric) patterns with [Cline](https://cline.bot/), exposing them as tools for AI-driven task execution and enhancing Cline's capabilities.\n- **[Facebook Ads](https://github.com/gomarble-ai/facebook-ads-mcp-server)** - MCP server acting as an interface to the Facebook Ads, enabling programmatic access to Facebook Ads data and management features.\n- **[Facebook Ads 10xeR](https://github.com/fortytwode/10xer)** - Advanced Facebook Ads MCP server with enhanced creative insights, multi-dimensional breakdowns, and comprehensive ad performance analytics.\n- **[Facebook Ads Library](https://github.com/trypeggy/facebook-ads-library-mcp)** - Get any answer from the Facebook Ads Library, conduct deep research including messaging, creative testing and comparisons in seconds.\n- **[Fal MCP Server](https://github.com/raveenb/fal-mcp-server)** - Generate AI images, videos, and music using Fal.ai models (FLUX, Stable Diffusion, MusicGen) directly in Claude\n- **[Fantasy PL](https://github.com/rishijatia/fantasy-pl-mcp)** - Give your coding agent direct access to up-to date Fantasy Premier League data\n- **[Fast Filesystem](https://github.com/efforthye/fast-filesystem-mcp)** - Advanced filesystem operations with large file handling capabilities and Claude-optimized features. Provides fast file reading/writing, sequential reading for large files, directory operations, file search, and streaming writes with backup & recovery.\n- **[Fastmail MCP](https://github.com/MadLlama25/fastmail-mcp)** - Access Fastmail via JMAP: list/search emails, send and move mail, handle attachments/threads, plus contacts and calendar tools.\n- **[fastn.ai – Unified API MCP Server](https://github.com/fastnai/mcp-fastn)** - A remote, dynamic MCP server with a unified API that connects to 1,000+ tools, actions, and workflows, featuring built-in authentication and monitoring.\n- **[FDIC BankFind MCP Server - (Unofficial)](https://github.com/clafollett/fdic-bank-find-mcp-server)** - The is a MCPserver that brings the power of FDIC BankFind APIs straight to your AI tools and workflows. Structured U.S. banking data, delivered with maximum vibes. 😎📊\n- **[Federal Reserve Economic Data (FRED)](https://github.com/stefanoamorelli/fred-mcp-server)** (by Stefano Amorelli) - Community developed MCP server to interact with the Federal Reserve Economic Data.\n- **[Fetch](https://github.com/zcaceres/fetch-mcp)** - A server that flexibly fetches HTML, JSON, Markdown, or plaintext.\n- **[Feyod](https://github.com/jeroenvdmeer/feyod-mcp)** - A server that answers questions about football matches, and specialised in the football club Feyenoord.\n- **[FHIR](https://github.com/wso2/fhir-mcp-server)** - A Model Context Protocol server that provides seamless, standardized access to Fast Healthcare Interoperability Resources (FHIR) data from any compatible FHIR server. Designed for easy integration with AI tools, developer workflows, and healthcare applications, it enables natural language and programmatic search, retrieval, and analysis of clinical data.\n- **[Fibaro HC3](https://github.com/coding-sailor/mcp-server-hc3)** - MCP server for Fibaro Home Center 3 smart home systems.\n- **[Figma](https://github.com/GLips/Figma-Context-MCP)** - Give your coding agent direct access to Figma file data, helping it one-shot design implementation.\n- **[Figma](https://github.com/paulvandermeijs/figma-mcp)** - A blazingly fast MCP server to read and export your Figma design files.\n- **[Figma to Flutter](https://github.com/mhmzdev/figma-flutter-mcp)** - Write down clean and better Flutter code from Figma design tokens and enrich nodes data in Flutter terminology.\n- **[Files](https://github.com/flesler/mcp-files)** - Enables agents to quickly find and edit code in a codebase with surgical precision. Find symbols, edit them everywhere.\n- **[FileSystem Server](https://github.com/Oncorporation/filesystem_server)** - Local MCP server for Visual Studio 2022 that provides code-workspace functionality by giving AI agents selective access to project folders and files\n- **[finmap.org](https://github.com/finmap-org/mcp-server)** MCP server provides comprehensive historical data from the US, UK, Russian and Turkish stock exchanges. Access sectors, tickers, company profiles, market cap, volume, value, and trade counts, as well as treemap and histogram visualizations.\n- **[Firebase](https://github.com/gannonh/firebase-mcp)** - Server to interact with Firebase services including Firebase Authentication, Firestore, and Firebase Storage.\n- **[Fish Audio](https://github.com/da-okazaki/mcp-fish-audio-server)** - Text-to-Speech integration with Fish Audio's API, supporting multiple voices, streaming, and real-time playback\n- **[FitBit MCP Server](https://github.com/NitayRabi/fitbit-mcp)** - An MCP server that connects to FitBit API using a token obtained from OAuth flow.\n- **[Fleet](https://github.com/SimplyMinimal/fleet-mcp)** - Full Fleet integration for device management, security monitoring, and compliance enforcement. Supports host management, live query execution, policy management, software inventory, vulnerability tracking, and MDM operations. Supports Read-Only and Read-Write modes.\n- **[FlightRadar24](https://github.com/sunsetcoder/flightradar24-mcp-server)** - A Claude Desktop MCP server that helps you track flights in real-time using Flightradar24 data.\n- **[Fluent-MCP](https://github.com/modesty/fluent-mcp)** - MCP server for Fluent (ServiceNow SDK) providing access to ServiceNow SDK CLI, API specifications, code snippets, and more.\n- **[Flyworks Avatar](https://github.com/Flyworks-AI/flyworks-mcp)** - Fast and free zeroshot lipsync MCP server.\n- **[fmp-mcp-server](https://github.com/vipbat/fmp-mcp-server)** - Enable your agent for M&A analysis and investment banking workflows. Access company profiles, financial statements, ratios, and perform sector analysis with the [Financial Modeling Prep APIs]\n- **[FoundationModels](https://github.com/phimage/mcp-foundation-models)** - An MCP server that integrates Apple's [FoundationModels](https://developer.apple.com/documentation/foundationmodels) for text generation.\n- **[Foursquare](https://github.com/foursquare/foursquare-places-mcp)** - Enable your agent to recommend places around the world with the [Foursquare Places API](https://location.foursquare.com/products/places-api/)\n- **[FPE Demo MCP](https://github.com/Horizon-Digital-Engineering/fpe-demo-mcp)** - FF3 Format Preserving Encryption with authentication patterns for secure data protection in LLM workflows.\n- **[FrankfurterMCP](https://github.com/anirbanbasu/frankfurtermcp)** - MCP server acting as an interface to the [Frankfurter API](https://frankfurter.dev/) for currency exchange data.\n- **[freqtrade-mcp](https://github.com/kukapay/freqtrade-mcp)** - An MCP server that integrates with the Freqtrade cryptocurrency trading bot.\n- **[GDAL](https://github.com/Wayfinder-Foundry/gdal-mcp)** - GDAL-style geospatial workflows with built-in reasoning guidance and reference resources to give AI agents catalogue discovery, metadata intelligence, and raster/vector processing.\n- **[GDB](https://github.com/pansila/mcp_server_gdb)** - A GDB/MI protocol server based on the MCP protocol, providing remote application debugging capabilities with AI assistants.\n- **[Gemini Bridge](https://github.com/eLyiN/gemini-bridge)** - Lightweight MCP server that enables Claude to interact with Google's Gemini AI through the official CLI, offering zero API costs and stateless architecture.\n- **[Geolocation](https://github.com/jackyang25/geolocation-mcp-server)** - WalkScore API integration for walkability, transit, and bike scores.\n- **[ggRMCP](https://github.com/aalobaidi/ggRMCP)** - A Go gateway that converts gRPC services into MCP-compatible tools, allowing AI models like Claude to directly call your gRPC services.\n- **[Ghost](https://github.com/MFYDev/ghost-mcp)** - A Model Context Protocol (MCP) server for interacting with Ghost CMS through LLM interfaces like Claude.\n- **[Git](https://github.com/geropl/git-mcp-go)** - Allows LLM to interact with a local git repository, incl. optional push support.\n- **[Git Mob](https://github.com/Mubashwer/git-mob-mcp-server)** - MCP server that interfaces with the [git-mob](https://github.com/Mubashwer/git-mob) CLI app for managing co-authors in git commits during pair/mob programming.\n- **[Github](https://github.com/0xshariq/github-mcp-server)** - A Model Context Protocol (MCP) server that provides 29 Git operations + 11 workflow combinations for AI assistants and developers. This server exposes comprehensive Git repository management through a standardized interface, enabling AI models and developers to safely manage complex version control workflows.\n- **[GitHub Actions](https://github.com/ko1ynnky/github-actions-mcp-server)** - A Model Context Protocol (MCP) server for interacting with GitHub Actions.\n- **[GitHub Enterprise MCP](https://github.com/ddukbg/github-enterprise-mcp)** - A Model Context Protocol (MCP) server for interacting with GitHub Enterprise.\n- **[GitHub GraphQL](https://github.com/QuentinCody/github-graphql-mcp-server)** - Unofficial GitHub MCP server that provides access to GitHub's GraphQL API, enabling more powerful and flexible queries for repository data, issues, pull requests, and other GitHub resources.\n- **[GitHub Projects](https://github.com/redducklabs/github-projects-mcp)** — Manage GitHub Projects with full GraphQL API access including items, fields, and milestones.\n- **[GitHub Repos Manager MCP Server](https://github.com/kurdin/github-repos-manager-mcp)** - Token-based GitHub automation management. No Docker, Flexible configuration, 80+ tools with direct API integration.\n- **[GitMCP](https://github.com/idosal/git-mcp)** - gitmcp.io is a generic remote MCP server to connect to ANY GitHub repository or project documentation effortlessly\n- **[Glean](https://github.com/longyi1207/glean-mcp-server)** - A server that uses Glean API to search and chat.\n- **[Gmail](https://github.com/GongRzhe/Gmail-MCP-Server)** - A Model Context Protocol (MCP) server for Gmail integration in Claude Desktop with auto authentication support.\n- **[Gmail](https://github.com/Ayush-k-Shukla/gmail-mcp-server)** - A Simple MCP server for Gmail with support for all basic operations with oauth2.0.\n- **[Gmail Headless](https://github.com/baryhuang/mcp-headless-gmail)** - Remote hostable MCP server that can get and send Gmail messages without local credential or file system setup.\n- **[Gmail MCP](https://github.com/gangradeamitesh/mcp-google-email)** - A Gmail service implementation using MCP (Model Context Protocol) that provides functionality for sending, receiving, and managing emails through Gmail's API.\n- **[Gnuradio](https://github.com/yoelbassin/gnuradioMCP)** - An MCP server for GNU Radio that enables LLMs to autonomously create and modify RF .grc flowcharts.\n- **[Goal Story](https://github.com/hichana/goalstory-mcp)** - a Goal Tracker and Visualization Tool for personal and professional development.\n- **[GOAT](https://github.com/goat-sdk/goat/tree/main/typescript/examples/by-framework/model-context-protocol)** - Run more than +200 onchain actions on any blockchain including Ethereum, Solana and Base.\n- **[Godot](https://github.com/Coding-Solo/godot-mcp)** - An MCP server providing comprehensive Godot engine integration for project editing, debugging, and scene management.\n- **[Golang Filesystem Server](https://github.com/mark3labs/mcp-filesystem-server)** - Secure file operations with configurable access controls built with Go!\n- **[Goodnews](https://github.com/VectorInstitute/mcp-goodnews)** - A simple MCP server that delivers curated positive and uplifting news stories.\n- **[Google Ads](https://github.com/gomarble-ai/google-ads-mcp-server)** - MCP server acting as an interface to the Google Ads, enabling programmatic access to Facebook Ads data and management features.\n- **[Google Analytics](https://github.com/surendranb/google-analytics-mcp)** - Google Analytics MCP Server to bring data across 200+ dimensions & metrics for LLMs to analyse.\n- **[Google Analytics 4](https://github.com/gomakers-ai/mcp-google-analytics)** - MCP server for Google Analytics Data API and Measurement Protocol to read reports and send events.\n- **[Google Calendar](https://github.com/v-3/google-calendar)** - Integration with Google Calendar to check schedules, find time, and add/delete events\n- **[Google Calendar](https://github.com/nspady/google-calendar-mcp)** - Google Calendar MCP Server for managing Google calendar events. Also supports searching for events by attributes like title and location.\n- **[Google Custom Search](https://github.com/adenot/mcp-google-search)** - Provides Google Search results via the Google Custom Search API\n- **[Google Maps](https://github.com/Mastan1301/google_maps_mcp)** - Provides location results using Google Places API.\n- **[Google Sheets](https://github.com/xing5/mcp-google-sheets)** - Access and editing data to your Google Sheets.\n- **[Google Sheets](https://github.com/rohans2/mcp-google-sheets)** - An MCP Server written in TypeScript to access and edit data in your Google Sheets.\n- **[Google Tasks](https://github.com/zcaceres/gtasks-mcp)** - Google Tasks API Model Context Protocol Server.\n- **[Google Vertex AI Search](https://github.com/ubie-oss/mcp-vertexai-search)** - Provides Google Vertex AI Search results by grounding a Gemini model with your own private data\n- **[Google Workspace](https://github.com/taylorwilsdon/google_workspace_mcp)** - Comprehensive Google Workspace MCP with full support for Calendar, Drive, Gmail, and Docs using Streamable HTTP or SSE transport.\n- **[Google-Scholar](https://github.com/JackKuo666/Google-Scholar-MCP-Server)** - Enable AI assistants to search and access Google Scholar papers through a simple MCP interface.\n- **[Google-Scholar](https://github.com/mochow13/google-scholar-mcp)** - An MCP server for Google Scholar written in TypeScript with Streamable HTTP transport, along with a `client` implementations that integrates with the server and interacts with `gemini-2.5-flash`.\n- **[Gopher MCP](https://github.com/cameronrye/gopher-mcp)** - Modern, cross-platform MCP server that enables AI assistants to browse and interact with both Gopher protocol and Gemini protocol resources safely and efficiently.\n- **[Gralio SaaS Database](https://github.com/tymonTe/gralio-mcp)** - Find and compare SaaS products, including data from G2 reviews, Trustpilot, Crunchbase, Linkedin, pricing, features and more, using [Gralio MCP](https://gralio.ai/mcp) server\n- **[GraphQL](https://github.com/drestrepom/mcp_graphql)** - Comprehensive GraphQL API integration that automatically exposes each GraphQL query as a separate tool.\n- **[GraphQL Schema](https://github.com/hannesj/mcp-graphql-schema)** - Allow LLMs to explore large GraphQL schemas without bloating the context.\n- **[Graylog](https://github.com/Pranavj17/mcp-server-graylog)** - Search Graylog logs by absolute/relative timestamps, filter by streams, and debug production issues directly from Claude Desktop.\n- **[Grok-MCP](https://github.com/merterbak/Grok-MCP)** - MCP server for xAI’s API featuring the latest Grok models, image analysis & generation, and web search.\n- **[gx-mcp-server](https://github.com/davidf9999/gx-mcp-server)** - Expose Great Expectations data validation and quality checks as MCP tools for AI agents.\n- **[HackMD](https://github.com/yuna0x0/hackmd-mcp)** (by yuna0x0) - An MCP server for HackMD, a collaborative markdown editor. It allows users to create, read, and update documents in HackMD using the Model Context Protocol.\n- **[HAProxy](https://github.com/tuannvm/haproxy-mcp-server)** - A Model Context Protocol (MCP) server for HAProxy implemented in Go, leveraging HAProxy Runtime API.\n- **[Hashing MCP Server](https://github.com/kanad13/MCP-Server-for-Hashing)** - MCP Server with cryptographic hashing functions e.g. SHA256, MD5, etc.\n- **[HDW LinkedIn](https://github.com/horizondatawave/hdw-mcp-server)** - Access to profile data and management of user account with [HorizonDataWave.ai](https://horizondatawave.ai/).\n- **[HeatPump](https://github.com/jiweiqi/heatpump-mcp-server)** — Residential heat - pump sizing & cost-estimation tools by **HeatPumpHQ**.\n- **[Helm Chart CLI](https://github.com/jeff-nasseri/helm-chart-cli-mcp)** - Helm MCP provides a bridge between AI assistants and the Helm package manager for Kubernetes. It allows AI assistants to interact with Helm through natural language requests, executing commands like installing charts, managing repositories, and more.\n- **[Heurist Mesh Agent](https://github.com/heurist-network/heurist-mesh-mcp-server)** - Access specialized web3 AI agents for blockchain analysis, smart contract security, token metrics, and blockchain interactions through the [Heurist Mesh network](https://github.com/heurist-network/heurist-agent-framework/tree/main/mesh).\n- **[HLedger MCP](https://github.com/iiAtlas/hledger-mcp)** - Double entry plain text accounting, right in your LLM! This MCP enables comprehensive read, and (optional) write access to your local [HLedger](https://hledger.org/) accounting journals.\n- **[Holaspirit](https://github.com/syucream/holaspirit-mcp-server)** - Interact with [Holaspirit](https://www.holaspirit.com/).\n- **[Home Assistant](https://github.com/tevonsb/homeassistant-mcp)** - Interact with [Home Assistant](https://www.home-assistant.io/) including viewing and controlling lights, switches, sensors, and all other Home Assistant entities.\n- **[Home Assistant](https://github.com/voska/hass-mcp)** - Docker-ready MCP server for Home Assistant with entity management, domain summaries, automation support, and guided conversations. Includes pre-built container images for easy installation.\n- **[HTML to Markdown](https://github.com/levz0r/html-to-markdown-mcp)** - Fetch web pages and convert HTML to clean, formatted Markdown. Handles large pages with automatic file saving to bypass token limits.\n- **[html2md-mcp](https://github.com/sunshad0w/html2md-mcp)** - MCP server for converting HTML to Markdown with browser support and authentication. Reduces HTML size by 90-95% using trafilatura and BeautifulSoup4, with Playwright integration for JavaScript-rendered content.\n- **[HubSpot](https://github.com/buryhuang/mcp-hubspot)** - HubSpot CRM integration for managing contacts and companies. Create and retrieve CRM data directly through Claude chat.\n- **[HuggingFace Spaces](https://github.com/evalstate/mcp-hfspace)** - Server for using HuggingFace Spaces, supporting Open Source Image, Audio, Text Models and more. Claude Desktop mode for easy integration.\n- **[Human-In-the-Loop](https://github.com/GongRzhe/Human-In-the-Loop-MCP-Server)** - A powerful MCP Server that enables AI assistants like Claude to interact with humans through intuitive GUI dialogs. This server bridges the gap between automated AI processes and human decision-making by providing real-time user input tools, choices, confirmations, and feedback mechanisms.\n- **[Human-use](https://github.com/RapidataAI/human-use)** - Instant human feedback through an MCP, have your AI interact with humans around the world. Powered by [Rapidata](https://www.rapidata.ai/)\n- **[Hyperledger Fabric Agent Suite](https://github.com/padmarajkore/hlf-fabric-agent)** - Modular toolkit for managing Fabric test networks and chaincode lifecycle via MCP tools.\n- **[Hyperliquid](https://github.com/mektigboy/server-hyperliquid)** - An MCP server implementation that integrates the Hyperliquid SDK for exchange data.\n- **[Hypertool](https://github.com/toolprint/hypertool-mcp)** – MCP that let's you create hot - swappable, \"persona toolsets\" from multiple MCP servers to reduce tool overload and improve tool execution.\n- **[hyprmcp](https://github.com/stefanoamorelli/hyprmcp)** (by Stefano Amorelli) - Lightweight MCP server for `hyprland`.\n- **[iFlytek SparkAgent Platform](https://github.com/iflytek/ifly-spark-agent-mcp)** - This is a simple example of using MCP Server to invoke the task chain of the  iFlytek SparkAgent Platform.\n- **[iFlytek Workflow](https://github.com/iflytek/ifly-workflow-mcp-server)** - Connect to iFlytek Workflow via the MCP server and run your own Agent.\n- **[IIIF](https://github.com/code4history/IIIF_MCP)** - Comprehensive IIIF (International Image Interoperability Framework) protocol support for searching, navigating, and manipulating digital collections from museums, libraries, and archives worldwide.\n- **[Image Generation](https://github.com/GongRzhe/Image-Generation-MCP-Server)** - This MCP server provides image generation capabilities using the Replicate Flux model.\n- **[ImageSorcery MCP](https://github.com/sunriseapps/imagesorcery-mcp)** - ComputerVision-based 🪄 sorcery of image recognition and editing tools for AI assistants.\n- **[IMAP MCP](https://github.com/dominik1001/imap-mcp)** - 📧 An IMAP Model Context Protocol (MCP) server to expose IMAP operations as tools for AI assistants.\n- **[iMCP](https://github.com/loopwork-ai/iMCP)** - A macOS app that provides an MCP server for your iMessage, Reminders, and other Apple services.\n- **[InfluxDB](https://github.com/idoru/influxdb-mcp-server)** - Run queries against InfluxDB OSS API v2.\n- **[Inner Monologue MCP](https://github.com/abhinav-mangla/inner-monologue-mcp)** - A cognitive reasoning tool that enables LLMs to engage in private, structured self-reflection and multi-step reasoning before generating responses, improving response quality and problem-solving capabilities.\n- **[Inoyu](https://github.com/sergehuber/inoyu-mcp-unomi-server)** - Interact with an Apache Unomi CDP customer data platform to retrieve and update customer profiles\n- **[Instagram DM](https://github.com/trypeggy/instagram_dm_mcp)** - Send DMs on Instagram via your LLM\n- **[Intelligent Image Generator](https://github.com/shinpr/mcp-image)** - Turn casual prompts into professional-quality images with AI enhancement\n- **[interactive-mcp](https://github.com/ttommyth/interactive-mcp)** - Enables interactive LLM workflows by adding local user prompts and chat capabilities directly into the MCP loop.\n- **[Intercom](https://github.com/raoulbia-ai/mcp-server-for-intercom)** - An MCP-compliant server for retrieving customer support tickets from Intercom. This tool enables AI assistants like Claude Desktop and Cline to access and analyze your Intercom support tickets.\n- **[iOS Simulator](https://github.com/InditexTech/mcp-server-simulator-ios-idb)** - A Model Context Protocol (MCP) server that enables LLMs to interact with iOS simulators (iPhone, iPad, etc.) through natural language commands.\n- **[ipybox](https://github.com/gradion-ai/ipybox)** - Python code execution sandbox based on IPython and Docker. Stateful code execution, file transfer between host and container, configurable network access. See [ipybox MCP server](https://gradion-ai.github.io/ipybox/mcp-server/) for details.\n- **[it-tools-mcp](https://github.com/wrenchpilot/it-tools-mcp)** - A Model Context Protocol server that recreates [CorentinTh it-tools](https://github.com/CorentinTh/it-tools) utilities for AI agents, enabling access to a wide range of developer tools (encoding, decoding, conversions, and more) via MCP.\n- **[itemit MCP](https://github.com/umin-ai/itemit-mcp)** - itemit is Asset Tracking MCP that manage the inventory, monitoring and location tracking that powers over +300 organizations.\n- **[iTerm MCP](https://github.com/ferrislucas/iterm-mcp)** - Integration with iTerm2 terminal emulator for macOS, enabling LLMs to execute and monitor terminal commands.\n- **[iTerm MCP Server](https://github.com/rishabkoul/iTerm-MCP-Server)** - A Model Context Protocol (MCP) server implementation for iTerm2 terminal integration. Able to manage multiple iTerm Sessions.\n- **[Java Decompiler](https://github.com/idachev/mcp-javadc)** - Decompile Java bytecode into readable source code from .class files, package names, or JAR archives using CFR decompiler\n- **[JavaFX](https://github.com/quarkiverse/quarkus-mcp-servers/tree/main/jfx)** - Make drawings using a JavaFX canvas\n- **[JDBC](https://github.com/quarkiverse/quarkus-mcp-servers/tree/main/jdbc)** - Connect to any JDBC-compatible database and query, insert, update, delete, and more. Supports MySQL, PostgreSQL, Oracle, SQL Server, SQLite and [more](https://github.com/quarkiverse/quarkus-mcp-servers/tree/main/jdbc#supported-jdbc-variants).\n- **[Jenkins](https://github.com/jasonkylelol/jenkins-mcp-server)** - This MCP server allow you to create Jenkins tasks.\n- **[JMeter](https://github.com/QAInsights/jmeter-mcp-server)** - Run load testing using Apache JMeter via MCP-compliant tools.\n- **[Job Searcher](https://github.com/0xDAEF0F/job-searchoor)** - A FastMCP server that provides tools for retrieving and filtering job listings based on time period, keywords, and remote work preferences.\n- **[jobswithgpt](https://github.com/jobswithgpt/mcp)** - Job search MCP using jobswithgpt which indexes 500K+ public job listings and refreshed continously.\n- **[joinly](https://github.com/joinly-ai/joinly)** - MCP server to interact with browser-based meeting platforms (Zoom, Teams, Google Meet). Enables AI agents to send bots to online meetings, gather live transcripts, speak text, and send messages in the meeting chat.\n- **[JSON](https://github.com/GongRzhe/JSON-MCP-Server)** - JSON handling and processing server with advanced query capabilities using JSONPath syntax and support for array, string, numeric, and date operations.\n- **[JSON](https://github.com/kehvinbehvin/json-mcp-filter)** - JSON schema generation and filtering server with TypeScript type creation optimised for retrieving relevant context JSON data using quicktype-core and support for shape-based data extraction, nested object filtering, and array processing operations.\n- **[JSON to Excel by WTSolutions](https://github.com/he-yang/json-to-excel-mcp)** - Converting JSON into CSV format string from (1) JSON data, (2) URLs pointing to publiclly available .json files.\n- **[JSON2Video MCP](https://github.com/omergocmen/json2video-mcp-server)** - A Model Context Protocol (MCP) server implementation for programmatically generating videos using the json2video API. This server exposes powerful video generation and status-checking tools for use with LLMs, agents, or any MCP-compatible client.\n- **[jupiter-mcp](https://github.com/kukapay/jupiter-mcp)** - An MCP server for executing token swaps on the Solana blockchain using Jupiter's new Ultra API.\n- **[Jupyter MCP Server](https://github.com/datalayer/jupyter-mcp-server)** – Real-time interaction with Jupyter Notebooks, allowing AI to edit, document and execute code for data analysis, visualization etc. Compatible with any Jupyter deployment (local, JupyterHub, ...).\n- **[Jupyter Notebook](https://github.com/jjsantos01/jupyter-notebook-mcp)** - connects Jupyter Notebook to Claude AI, allowing Claude to directly interact with and control Jupyter Notebooks. This integration enables AI-assisted code execution, data analysis, visualization, and more.\n- **[k8s-multicluster-mcp](https://github.com/razvanmacovei/k8s-multicluster-mcp)** - An MCP server for interact with multiple Kubernetes clusters simultaneously using multiple kubeconfig files.\n- **[Kafka](https://github.com/tuannvm/kafka-mcp-server)** - A Model Context Protocol (MCP) server for Apache Kafka implemented in Go, leveraging [franz-go](https://github.com/twmb/franz-go).\n- **[Kafka Schema Registry MCP](https://github.com/aywengo/kafka-schema-reg-mcp)** \\ - A comprehensive MCP server for Kafka Schema Registry with 48 tools, multi-registry support, authentication, and production safety features. Enables AI-powered schema management with enterprise-grade capabilities including schema contexts, migration tools, and comprehensive export capabilities.\n- **[kafka-mcp](https://github.com/shivamxtech/kafka-mcp)** - An MCP Server for Kafka clusters to interact with kafka environment via tools on messages, topics, offsets, partitions for consumer and producers along with seamless integration with MCP clients.\n- **[Kaggle-mcp](https://github.com/Seif-Sameh/Kaggle-mcp.git)** - An MCP server that provides seamless integration with the Kaggle API. Interact with Kaggle competitions, datasets, kernels, and models through MCP-compatible clients like Claude Desktop.\n- **[Keycloak](https://github.com/idoyudha/mcp-keycloak)** - The Keycloak MCP Server designed for agentic applications to manage and search data in Keycloak efficiently.\n- **[Keycloak MCP](https://github.com/ChristophEnglisch/keycloak-model-context-protocol)** - This MCP server enables natural language interaction with Keycloak for user and realm management including creating, deleting, and listing users and realms.\n- **[Keycloak MCP Server](https://github.com/sshaaf/keycloak-mcp-server)** - designed to work with Keycloak for identity and access management, with about 40+ tools covering, Users, Realms, Clients, Roles, Groups, IDPs, Authentication. Native builds available.\n- **[Kibana MCP](https://github.com/TocharianOU/mcp-server-kibana.git)** (by TocharianOU) - A community-maintained MCP server implementation that allows any MCP-compatible client to access and manage Kibana instances through natural language or programmatic requests.\n- **[Kibela](https://github.com/kiwamizamurai/mcp-kibela-server)** (by kiwamizamurai) - Interact with Kibela API.\n- **[KiCad MCP](https://github.com/lamaalrajih/kicad-mcp)** - MCP server for KiCad on Mac, Windows, and Linux.\n- **[kill-process-mcp](https://github.com/misiektoja/kill-process-mcp)** - List and terminate OS processes via natural language queries\n- **[Kindred Offers & Discounts MCP](https://github.com/kindred-app/mcp-server-kindred-offers)** (by kindred.co) - This MCP server allows you to get live deals and offers/coupons from e-commerce merchant sites all over the world.\n- **[kintone](https://github.com/macrat/mcp-server-kintone)** - Manage records and apps in [kintone](https://kintone.com) through LLM tools.\n- **[KnowAir Weather MCP](https://github.com/shuowang-ai/Weather-MCP)** - A comprehensive Model Context Protocol (MCP) server providing real-time weather data, air quality monitoring, forecasts, and astronomical information powered by Caiyun Weather API.\n- **[Kokoro TTS](https://github.com/mberg/kokoro-tts-mcp)** - Use Kokoro text to speech to convert text to MP3s with optional autoupload to S3.\n- **[Kong Konnect](https://github.com/Kong/mcp-konnect)** - A Model Context Protocol (MCP) server for interacting with Kong Konnect APIs, allowing AI assistants to query and analyze Kong Gateway configurations, traffic, and analytics.\n- **[Korea Stock Analyzer](https://github.com/Mrbaeksang/korea-stock-analyzer-mcp)** - Analyze Korean stocks (KOSPI/KOSDAQ) with 6 legendary investment strategies including Buffett, Lynch, Graham, Greenblatt, Fisher, and Templeton.\n- **[KRS Poland](https://github.com/pkolawa/krs-poland-mcp-server)** - Access to Polish National Court Register (KRS)—the government's authoritative registry of all businesses, foundations, and other legal entities.\n- **[Kubeflow Spark History MCP Server](https://github.com/kubeflow/mcp-apache-spark-history-server)** - Enable AI agents to analyze Spark job performance, identify bottlenecks, and provide intelligent insights.\n- **[Kubernetes](https://github.com/Flux159/mcp-server-kubernetes)** - Connect to Kubernetes cluster and manage pods, deployments, and services.\n- **[Kubernetes and OpenShift](https://github.com/manusa/kubernetes-mcp-server)** - A powerful Kubernetes MCP server with additional support for OpenShift. Besides providing CRUD operations for any Kubernetes resource, this server provides specialized tools to interact with your cluster.\n- **[KubeSphere](https://github.com/kubesphere/ks-mcp-server)** - The KubeSphere MCP Server is a Model Context Protocol(MCP) server that provides integration with KubeSphere APIs, enabling to get resources from KubeSphere. Divided into four tools modules: Workspace Management, Cluster Management, User and Roles, Extensions Center.\n- **[Kukapay MCP Servers](https://github.com/kukapay/kukapay-mcp-servers)** - A comprehensive suite of Model Context Protocol (MCP) servers dedicated to cryptocurrency, blockchain, and Web3 data aggregation, analysis, and services from Kukapay.\n- **[kwrds.ai](https://github.com/mkotsollaris/kwrds_ai_mcp)** - Keyword research, people also ask, SERP and other SEO tools for [kwrds.ai](https://www.kwrds.ai/)\n- **[KYC-mcp-server](https://github.com/vishnurudra-ai/KYC-mcp-server)** - Know Your Computer (KYC) - MCP Server compatible with Claude Desktop. Comprehensive system diagnostics for Windows, Mac OS and Linux operating system with AI-powered recommendations.\n- **[Langflow MCP Server](https://github.com/nobrainer-tech/langflow-mcp)** - Comprehensive MCP server providing 90 tools for Langflow workflow automation - manage flows, execute workflows, handle builds, and interact with knowledge bases. Includes Docker support and full API coverage for Langflow 1.6.4.\n- **[Langflow-DOC-QA-SERVER](https://github.com/GongRzhe/Langflow-DOC-QA-SERVER)** - A Model Context Protocol server for document Q&A powered by Langflow. It demonstrates core MCP concepts by providing a simple interface to query documents through a Langflow backend.\n- **[Language Server](https://github.com/isaacphi/mcp-language-server)** - MCP Language Server helps MCP enabled clients navigate codebases more easily by giving them access to semantic tools like get definition, references, rename, and diagnostics.\n- **[Large File MCP](https://github.com/willianpinho/large-file-mcp)** - Intelligent handling of large files with smart chunking, navigation, and streaming capabilities. Features LRU caching, regex\nsearch, and comprehensive file analysis.\n- **[Lark(Feishu)](https://github.com/kone-net/mcp_server_lark)** - A Model Context Protocol(MCP) server for Lark(Feishu) sheet, message, doc and etc.\n- **[Lazy Toggl MCP](https://github.com/movstox/lazy-toggl-mcp)** - Simple unofficial MCP server to track time via Toggl API\n- **[lean-lsp-mcp](https://github.com/oOo0oOo/lean-lsp-mcp)** - Interact with the [Lean theorem prover](https://lean-lang.org/) via the Language Server Protocol.\n- **[librenms-mcp](https://github.com/mhajder/librenms-mcp)** - MCP server for [LibreNMS](https://www.librenms.org/) management\n- **[libvirt-mcp](https://github.com/MatiasVara/libvirt-mcp)** - Allows LLM to interact with libvirt thus enabling to create, destroy or list the Virtual Machines in a system.\n- **[Lightdash](https://github.com/syucream/lightdash-mcp-server)** - Interact with [Lightdash](https://www.lightdash.com/), a BI tool.\n- **[LINE](https://github.com/amornpan/py-mcp-line)** (by amornpan) - Implementation for LINE Bot integration that enables Language Models to read and analyze LINE conversations through a standardized interface. Features asynchronous operation, comprehensive logging, webhook event handling, and support for various message types.\n- **[Linear](https://github.com/tacticlaunch/mcp-linear)** - Interact with Linear project management system.\n- **[Linear](https://github.com/jerhadf/linear-mcp-server)** - Allows LLM to interact with Linear's API for project management, including searching, creating, and updating issues.\n- **[Linear (Go)](https://github.com/geropl/linear-mcp-go)** - Allows LLM to interact with Linear's API via a single static binary.\n- **[Linear MCP](https://github.com/anoncam/linear-mcp)** - Full blown implementation of the Linear SDK to support comprehensive Linear management of projects, initiatives, issues, users, teams and states.\n- **[Linked API MCP](https://github.com/Linked-API/linkedapi-mcp)** - MCP server that lets AI assistants control LinkedIn accounts and retrieve real-time data.\n- **[Listmonk MCP Server](https://github.com/rhnvrm/listmonk-mcp)** (by rhnvrm) - Full API coverage of [Listmonk](https://github.com/knadh/listmonk) email marketing FOSS.\n- **[LlamaCloud](https://github.com/run-llama/mcp-server-llamacloud)** (by marcusschiesser) - Integrate the data stored in a managed index on [LlamaCloud](https://cloud.llamaindex.ai/)\n- **[lldb-mcp](https://github.com/stass/lldb-mcp)** - A Model Context Protocol server for LLDB that provides LLM-driven debugging.\n- **[llm-context](https://github.com/cyberchitta/llm-context.py)** - Provides a repo-packing MCP tool with configurable profiles that specify file inclusion/exclusion patterns and optional prompts.\n- **[Local History](https://github.com/xxczaki/local-history-mcp)** – MCP server for accessing VS Code/Cursor's Local History.\n- **[Local RAG](https://github.com/shinpr/mcp-local-rag)** - Lightweight local document search with minimal setup. Search across PDF, DOCX, TXT, and Markdown files - no Docker, no external services required.\n- **[Locust](https://github.com/QAInsights/locust-mcp-server)** - Allows running and analyzing Locust tests using MCP compatible clients.\n- **[Loki](https://github.com/scottlepp/loki-mcp)** - Golang based MCP Server to query logs from [Grafana Loki](https://github.com/grafana/loki).\n- **[Loki MCP Server](https://github.com/mo-silent/loki-mcp-server)** - Python based MCP Server for querying and analyzing logs from Grafana Loki with advanced filtering and authentication support.\n- **[LottieFiles](https://github.com/junmer/mcp-server-lottiefiles)** - Searching and retrieving Lottie animations from [LottieFiles](https://lottiefiles.com/)\n- **[lsp-mcp](https://github.com/Tritlo/lsp-mcp)** - Interact with Language Servers usint the Language Server Protocol to provide additional context information via hover, code actions and completions.\n- **[Lspace](https://github.com/Lspace-io/lspace-server)** - Turn scattered ChatGPT/Claude/Cursor conversations into persistent, searchable knowledge.\n- **[lucene-mcp-server](https://github.com/VivekKumarNeu/MCP-Lucene-Server)** - spring boot server using Lucene for fast document search and management.\n- **[lucid-mcp-server](https://github.com/smartzan63/lucid-mcp-server)** – An MCP server for Lucidchart and Lucidspark: connect, search, and obtain text representations of your Lucid documents and diagrams via LLM - driven AI Vision analysis. [npm](https://www.npmjs.com/package/lucid-mcp-server)\n- **[LunarCrush Remote MCP](https://github.com/lunarcrush/mcp-server)** - Get the latest social metrics and posts for both current live social context as well as historical metrics in LLM and token optimized outputs. Ideal for automated trading / financial advisory.\n- **[mac-messages-mcp](https://github.com/carterlasalle/mac_messages_mcp)** - An MCP server that securely interfaces with your iMessage database via the Model Context Protocol (MCP), allowing LLMs to query and analyze iMessage conversations. It includes robust phone number validation, attachment processing, contact management, group chat handling, and full support for sending and receiving messages.\n- **[Maestro MCP](https://github.com/maestro-org/maestro-mcp)** - An MCP server for interacting with Bitcoin via the Maestro RPC API.\n- **[Magg: The MCP Aggregator](https://github.com/sitbon/magg)** - A meta-MCP server that acts as a universal hub, allowing LLMs to autonomously discover, install, and orchestrate multiple MCP servers - essentially giving AI assistants the power to extend their own capabilities on-demand. Includes `mbro`, a powerful CLI MCP server browser with scripting capability.\n- **[Mailchimp MCP](https://github.com/AgentX-ai/mailchimp-mcp)** - Allows AI agents to interact with the Mailchimp API (read-only)\n- **[MailNet](https://github.com/Astroa7m/MailNet-MCP-Server)** - Unified Gmail + Outlook MCP server with agentic orchestration, automatic token refresh, standardized base class for new providers, and dedicated email settings endpoints for tone, signature, and thread-aware replies.\n- **[MalwareBazaar_MCP](https://github.com/mytechnotalent/MalwareBazaar_MCP)** (by Kevin Thomas) - An AI-driven MCP server that autonomously interfaces with MalwareBazaar, delivering real-time threat intel and sample metadata for authorized cybersecurity research workflows.\n- **[man-mcp-server](https://github.com/guyru/man-mcp-server)** - MCP to search and access man pages on the local machine.\n- **[Mandoline](https://github.com/mandoline-ai/mandoline-mcp-server)** - Enable AI assistants to reflect on, critique, and continuously improve their own performance using Mandoline's evaluation framework.\n- **[MariaDB](https://github.com/abel9851/mcp-server-mariadb)** - MariaDB database integration with configurable access controls in Python.\n- **[Markdown2doc](https://github.com/Klavis-AI/klavis/tree/main/mcp_servers/pandoc)** - Convert between various file formats using Pandoc\n- **[Markdownify](https://github.com/zcaceres/mcp-markdownify-server)** - MCP to convert almost anything to Markdown (PPTX, HTML, PDF, Youtube Transcripts and more)\n- **[market-fiyati](https://github.com/mtcnbzks/market-fiyati-mcp-server)** - The MCP server for marketfiyati.org.tr, offering grocery price search and comparison across Turkish markets.)\n- **[Markitdown](https://github.com/Klavis-AI/klavis/tree/main/mcp_servers/markitdown)** - Convert files to Markdown\n- **[Masquerade](https://github.com/postralai/masquerade)** - Redact sensitive information from your PDF documents before sending them to Claude. Masquerade serves as a privacy firewall for LLMs.\n- **[MasterGo](https://github.com/mastergo-design/mastergo-magic-mcp)** - The server designed to connect MasterGo design tools with AI models. It enables AI models to directly retrieve DSL data from MasterGo design files.\n- **[Matlab-MCP-Tools](https://github.com/neuromechanist/matlab-mcp-tools)** - An MCP to write and execute MATLAB scripts, maintain workspace context between MCP calls, visualize plots, and perform section-by-section analysis of MATLAB code with full access to MATLAB's computational capabilities.\n- **[Maton](https://github.com/maton-ai/agent-toolkit/tree/main/modelcontextprotocol)** - Connect to your SaaS tools like HubSpot, Salesforce, and more.\n- **[Matrix](https://github.com/mjknowles/matrix-mcp-server)** - Interact with a Matrix homeserver.\n- **[Maven Tools MCP](https://github.com/arvindand/maven-tools-mcp)** - Maven Central dependency intelligence for JVM build tools. Supports all build tools (Maven, Gradle, SBT, Mill) with Context7 integration for documentation support.\n- **[Maybe Don't AI Policy Engine](https://www.maybedont.ai/download/)** - Yet another MCP security gateway, Maybe Don't AI provides policy checks on any call before it reaches downstream MCP servers to protect users from agents behaving poorly.\n- **[MCP Bundles Hub](https://github.com/thinkchainai/mcpbundles)** - Discover, install, and manage 500+ MCP provider integrations and bundles through [MCP Bundles](https://mcpbundles.com).\n- **[MCP Compass](https://github.com/liuyoshio/mcp-compass)** - Suggest the right MCP server for your needs\n- **[MCP Context Provider](https://github.com/doobidoo/MCP-Context-Provider)** - Static server that provides AI models with persistent tool-specific context and rules, preventing context loss between chat sessions and enabling consistent behavior across interactions.\n- **[MCP Create](https://github.com/tesla0225/mcp-create)** - A dynamic MCP server management service that creates, runs, and manages Model Context Protocol servers on-the-fly.\n- **[MCP Documentation Server](https://github.com/andrea9293/mcp-documentation-server)** - Server that provides local-first document management and semantic search via embeddings or Gemini AI (recommended). Optimized for performance with disk persistence, an in-memory index, and caching.\n- **[MCP Dynamic Tool Groups](https://github.com/ECF/MCPToolGroups)** - Example MCP servers that use [annotated](https://github.com/spring-ai-community/mcp-annotations) Java interfaces/classes as 'tool groups'.  Using standard MCP annotations, service implementations can then, at runtime, be used to generate tool specifications, and then dynamically added or removed from MCP servers.   The functionality is demonstrated in a sample tool group, but can be similarly used for any API or service.\n- **[MCP Installer](https://github.com/anaisbetts/mcp-installer)** - This server is a server that installs other MCP servers for you.\n- **[MCP on Android TV](https://github.com/MiddlePoint-Solutions/mcp-on-android-tv)** - A Model Context Protocol (MCP) server running directly on your Android TV with bundeld access to ADB on-device.\n- **[MCP OpenProject Server](https://github.com/boma086/mcp-openproject)** - Comprehensive MCP server for OpenProject integration with GitHub installation, CLI tools, and support for multiple AI assistants including Claude Code and Windsurf.\n- **[MCP ProjectManage OpenProject](https://github.com/boma086/mcp-projectmanage-openproject)** - This server provides the MCP service for project weekly reports, with project management information supplied by OpenProject.\n- **[MCP Proxy Server](https://github.com/TBXark/mcp-proxy)** - An MCP proxy server that aggregates and serves multiple MCP resource servers through a single HTTP server.\n- **[MCP Server Creator](https://github.com/GongRzhe/MCP-Server-Creator)** - A powerful Model Context Protocol (MCP) server that creates other MCP servers! This meta-server provides tools for dynamically generating FastMCP server configurations and Python code.\n- **[MCP Server Generator](https://github.com/SerhatUzbas/mcp-server-generator)** - An MCP server that creates and manages  MCP servers! Helps both non-technical users and developers build custom JavaScript MCP servers with AI guidance, automatic dependency management, and Claude Desktop integration.\n- **[MCP STDIO to Streamable HTTP Adapter](https://github.com/pyroprompts/mcp-stdio-to-streamable-http-adapter)** - Connect to Streamable HTTP MCP Servers even if the MCP Client only supports STDIO.\n- **[MCP Toolz](https://github.com/taylorleese/mcp-toolz)** - Context management, todo persistence, and AI second opinions for Claude Code. Save and restore contexts, code snippets, and todo lists across sessions and get feedback from ChatGPT, Claude, Gemini, and DeepSeek.\n- **[MCP-Airflow-API](https://github.com/call518/MCP-Airflow-API)** - Model Context Protocol (MCP) server for Apache Airflow API integration. Provides comprehensive tools for managing Airflow clusters including service operations, configuration management, status monitoring, and request tracking.\n- **[MCP-Ambari-API](https://github.com/call518/MCP-Ambari-API)** - Model Context Protocol (MCP) server for Apache Ambari API integration. This project provides tools for managing Hadoop clusters, including service operations, configuration management, status monitoring, and request tracking.\n- **[mcp-containerd](https://github.com/jokemanfire/mcp-containerd)** - The containerd MCP implemented by Rust supports the operation of the CRI interface.\n- **[MCP-Database-Server](https://github.com/executeautomation/mcp-database-server)** - Fastest way to interact with your Database such as SQL Server, SQLite and PostgreSQL\n- **[mcp-grep](https://github.com/erniebrodeur/mcp-grep)** - Python-based MCP server that brings grep functionality to LLMs. Supports common grep features including pattern searching, case-insensitive matching, context lines, and recursive directory searches.\n- **[mcp-k8s-go](https://github.com/strowk/mcp-k8s-go)** - Golang-based Kubernetes server for MCP to browse pods and their logs, events, namespaces and more. Built to be extensible.\n- **[mcp-local-rag](https://github.com/nkapila6/mcp-local-rag)** - \"primitive\" RAG-like web search model context protocol (MCP) server that runs locally using Google's MediaPipe Text Embedder and DuckDuckGo Search.\n- **[mcp-mcp](https://github.com/wojtyniak/mcp-mcp)** - Meta-MCP Server that acts as a tool discovery service for MCP clients.\n- **[mcp-meme-sticky](https://github.com/nkapila6/mcp-meme-sticky)** - Make memes or stickers using MCP server for WhatsApp or Telegram.\n- **[mcp-memory-service](https://github.com/doobidoo/mcp-memory-service)** - Universal MCP memory service providing semantic memory search, persistent storage, and autonomous memory consolidation for AI assistants across 13+ AI applications.\n- **[mcp-n8n](https://github.com/gomakers-ai/mcp-n8n)** - Complete n8n API integration with 41 tools for workflow management, execution monitoring, credentials, and 100+ pre-built templates. Control your entire n8n automation infrastructure through AI conversations.\n- **[MCP-NixOS](https://github.com/utensils/mcp-nixos)** - A Model Context Protocol server that provides AI assistants with accurate, real-time information about NixOS packages, system options, Home Manager settings, and nix-darwin macOS configurations.\n- **[mcp-notify](https://github.com/aahl/mcp-notify)** - An MCP server for message push, supporting Weixin, DingTalk, Telegram, Bark, Lark, Feishu, and Home Assistant.\n- **[mcp-open-library](https://github.com/8enSmith/mcp-open-library)** - A Model Context Protocol (MCP) server for the Open Library API that enables AI assistants to search for book and author information.\n- **[MCP-OpenStack-Ops](https://github.com/call518/MCP-OpenStack-Ops)** - Professional OpenStack operations automation via MCP server. Specialized tools for cluster monitoring, instance management, volume control & network analysis. FastMCP + OpenStack SDK + Bearer auth. Claude Desktop ready. Perfect for DevOps & cloud automation.\n- **[MCP-PostgreSQL-Ops](https://github.com/call518/MCP-PostgreSQL-Ops)** - Model Context Protocol (MCP) server for Apache Ambari API integration. This project provides tools for managing Hadoop clusters, including service operations, configuration management, status monitoring, and request tracking.\n- **[mcp-proxy](https://github.com/sparfenyuk/mcp-proxy)** - Connect to MCP servers that run on SSE transport, or expose stdio servers as an SSE server.\n- **[mcp-proxy](https://github.com/mikluko/mcp-proxy)** - Lightweight proxy that handles OAuth 2.0/PKCE authentication and token management for MCP clients lacking native OAuth support.\n- **[mcp-read-website-fast](https://github.com/just-every/mcp-read-website-fast)** - Fast, token-efficient web content extraction that converts websites to clean Markdown. Features Mozilla Readability, smart caching, polite crawling with robots.txt support, and concurrent fetching with minimal dependencies.\n- **[mcp-salesforce](https://github.com/lciesielski/mcp-salesforce-example)** - MCP server with basic demonstration of interactions with your Salesforce instance\n- **[mcp-sanctions](https://github.com/madupay/mcp-sanctions)** - Screen individuals and organizations against global sanctions lists (OFAC, SDN, UN, etc). Query by prompt or document upload.\n- **[mcp-screenshot-website-fast](https://github.com/just-every/mcp-screenshot-website-fast)** - High-quality screenshot capture optimized for Claude Vision API. Automatically tiles full pages into 1072x1072 chunks (1.15 megapixels) with configurable viewports and wait strategies for dynamic content.\n- **[mcp-server-leetcode](https://github.com/doggybee/mcp-server-leetcode)** - Practice and retrieve problems from LeetCode. Automate problem retrieval, solutions, and insights for coding practice and competitions.\n- **[Mcp-Swagger-Server](https://github.com/zaizaizhao/mcp-swagger-server)** (by zaizaizhao) - This MCP server transforms OpenAPI specifications into MCP tools, enabling AI assistants to interact with REST APIs through standardized protocol\n- **[mcp-vision](https://github.com/groundlight/mcp-vision)** - An MCP server exposing HuggingFace computer vision models such as zero-shot object detection as tools, enhancing the vision capabilities of large language or vision-language models.\n- **[mcp-weather](https://github.com/TimLukaHorstmann/mcp-weather)** - Accurate weather forecasts via the AccuWeather API (free tier available).\n- **[mcp-youtube-extract](https://github.com/sinjab/mcp_youtube_extract)** - A Model Context Protocol server for YouTube operations, extracting video information and transcripts with intelligent fallback logic. Features comprehensive logging, error handling, and support for both auto-generated and manual transcripts.\n- **[mcp_weather](https://github.com/isdaniel/mcp_weather_server)** - Get weather information from https://api.open-meteo.com API.\n- **[mcpcap](https://github.com/mcpcap/mcpcap)** - A modular Python MCP (Model Context Protocol) Server for analyzing PCAP files.\n- **[MCPfinder](https://github.com/mcpfinder/server)** - The AI Agent's \"App Store\": Discover, install, and monetize AI capabilities — all within the MCP ecosystem.\n- **[MCPIgnore Filesytem](https://github.com/CyberhavenInc/filesystem-mcpignore)** - A Data Security First filesystem MCP server that implements .mcpignore to prevent MCP clients from accessing sensitive data.\n- **[MCPJungle](https://github.com/mcpjungle/MCPJungle)** - Self-hosted MCP Registry and Gateway for enterprise AI Agents\n- **[MCPShell](https://github.com/inercia/mcpshell)** - Tool that allows LLMs to safely execute command-line tools, providing a secure bridge between LLMs and operating system commands.\n- **[Md2doc](https://github.com/Yorick-Ryu/md2doc-mcp)** - Convert Markdown text to DOCX format using an external conversion service\n- **[MeasureSpace MCP](https://github.com/MeasureSpace/measure-space-mcp-server)** - A free [Model Context Protocol (MCP) Server](https://smithery.ai/server/@MeasureSpace/measure-space-mcp-server) that provides global weather, climate, air quality forecast and geocoding services by [measurespace.io](https://measurespace.io).\n- **[MediaWiki](https://github.com/ProfessionalWiki/MediaWiki-MCP-Server)** - A Model Context Protocol (MCP) Server that interacts with any MediaWiki wiki\n- **[MediaWiki MCP adapter](https://github.com/lucamauri/MediaWiki-MCP-adapter)** - A custom Model Context Protocol adapter for MediaWiki and WikiBase APIs\n- **[medRxiv](https://github.com/JackKuo666/medRxiv-MCP-Server)** - Enable AI assistants to search and access medRxiv papers through a simple MCP interface.\n- **[mem0-mcp](https://github.com/mem0ai/mem0-mcp)** - A Model Context Protocol server for Mem0, which helps with managing coding preferences.\n- **[Membase](https://github.com/unibaseio/membase-mcp)** - Save and query your agent memory in distributed way by Membase.\n- **[Meme MCP](https://github.com/lidorshimoni/meme-mcp)** - Generate memes via AI using the Imgflip API through the Model Context Protocol.\n- **[memento-mcp](https://github.com/gannonh/memento-mcp)** - Knowledge graph memory system built on Neo4j with semantic search, temporal awareness.\n- **[memos-api-mcp](https://github.com/MemTensor/memos-api-mcp)** - A Model Context Protocol implementation for the API service of [MemOS](https://memos.openmem.net/), a memory management operating system designed for AI applications.\n- **[Meta Ads Remote MCP](https://github.com/pipeboard-co/meta-ads-mcp)** - Remote MCP server to interact with Meta Ads API - access, analyze, and manage Facebook, Instagram, and other Meta platforms advertising campaigns.\n- **[MetaTrader MCP](https://github.com/ariadng/metatrader-mcp-server)** - Enable AI LLMs to execute trades using MetaTrader 5 platform.\n- **[Metricool MCP](https://github.com/metricool/mcp-metricool)** - A Model Context Protocol server that integrates with Metricool's social media analytics platform to retrieve performance metrics and schedule content across networks like Instagram, Facebook, Twitter, LinkedIn, TikTok and YouTube.\n- **[Microsoft 365](https://github.com/merill/lokka)** - (by Merill) A Model Context Protocol (MCP) server for Microsoft 365. Includes support for all services including Teams, SharePoint, Exchange, OneDrive, Entra, Intune and more. See [Lokka](https://lokka.dev/) for more details.\n- **[Microsoft 365](https://github.com/softeria/ms-365-mcp-server)** - MCP server that connects to Microsoft Office and the whole Microsoft 365 suite using Graph API (including Outlook/mail, files, Excel, calendar)\n- **[Microsoft 365](https://github.com/pnp/cli-microsoft365-mcp-server)** - Single MCP server that allows to manage many different areas of Microsoft 365, for example: Entra ID, OneDrive, OneNote, Outlook, Planner, Power Apps, Power Automate, Power Platform, SharePoint Embedded, SharePoint Online, Teams, Viva Engage, and many more.\n- **[Microsoft 365 Files (SharePoint/OneDrive)](https://github.com/godwin3737/mcp-server-microsoft365-filesearch)** (by godwin3737) - MCP server with tools to search and get file content from Microsoft 365 including Onedrive and SharePoint. Works with Documents (pdf/docx), Presentations, Spreadsheets and Images.\n- **[Microsoft Teams](https://github.com/InditexTech/mcp-teams-server)** - MCP server that integrates Microsoft Teams messaging (read, post, mention, list members and threads)\n- **[Mifos X](https://github.com/openMF/mcp-mifosx)** - An MCP server for the Mifos X Open Source Banking useful for managing clients, loans, savings, shares, financial transactions and generating financial reports.\n- **[Mikrotik](https://github.com/jeff-nasseri/mikrotik-mcp)** - Mikrotik MCP server which cover networking operations (IP, DHCP, Firewall, etc)\n- **[Mindmap](https://github.com/YuChenSSR/mindmap-mcp-server)** (by YuChenSSR) - A server that generates mindmaps from input containing markdown code.\n- **[Minima](https://github.com/dmayboroda/minima)** - MCP server for RAG on local files\n- **[MLflow](https://github.com/kkruglik/mlflow-mcp)** - MLflow MCP server for ML experiment tracking with advanced querying, run comparison, artifact access, and model registry.\n- **[Mobile MCP](https://github.com/mobile-next/mobile-mcp)** (by Mobile Next) - MCP server for Mobile(iOS/Android) automation, app scraping and development using physical devices or simulators/emulators.\n- **[Modao Proto MCP](https://github.com/modao-dev/modao-proto-mcp)** - AI-powered HTML prototype generation server that converts natural language descriptions into complete HTML code with modern design and responsive layouts. Supports design description expansion and seamless integration with Modao workspace.\n- **[Monday.com (unofficial)](https://github.com/sakce/mcp-server-monday)** - MCP Server to interact with Monday.com boards and items.\n- **[MongoDB](https://github.com/kiliczsh/mcp-mongo-server)** - A Model Context Protocol Server for MongoDB.\n- **[MongoDB & Mongoose](https://github.com/nabid-pf/mongo-mongoose-mcp)** - MongoDB MCP Server with Mongoose Schema and Validation.\n- **[MongoDB Lens](https://github.com/furey/mongodb-lens)** - Full Featured MCP Server for MongoDB Databases.\n- **[Monzo](https://github.com/BfdCampos/monzo-mcp-bfdcampos)** - Access and manage your Monzo bank accounts through natural language, including balance checking, pot management, transaction listing, and transaction annotation across multiple account types (personal, joint, flex).\n- **[Morningstar](https://github.com/Morningstar/morningstar-mcp-server)** - MCP Server to interact with Morningstar Research, Editorial and Datapoints\n- **[MSSQL](https://github.com/aekanun2020/mcp-server/)** - MSSQL database integration with configurable access controls and schema inspection\n- **[MSSQL](https://github.com/JexinSam/mssql_mcp_server)** (by jexin) - MCP Server for MSSQL database in Python\n- **[MSSQL-MCP](https://github.com/daobataotie/mssql-mcp)** (by daobataotie) - MSSQL MCP that refer to the official website's SQLite MCP for modifications to adapt to MSSQL\n- **[MSSQL-MCP-Node](https://github.com/mihai-dulgheru/mssql-mcp-node)** (by mihai - dulgheru) – Node.js MCP server for Microsoft SQL Server featuring auto-detected single / multi-database configs, execute-SQL and schema tools, robust Zod validation, and optional Express endpoints for local testing\n- **[MSSQL-Python](https://github.com/amornpan/py-mcp-mssql)** (by amornpan) - A read-only Python implementation for MSSQL database access with enhanced security features, configurable access controls, and schema inspection capabilities. Focuses on safe database interaction through Python ecosystem.\n- **[Multi-Model Advisor](https://github.com/YuChenSSR/multi-ai-advisor-mcp)** - A Model Context Protocol (MCP) server that orchestrates queries across multiple Ollama models, synthesizing their insights to deliver a comprehensive and multifaceted AI perspective on any given query.\n- **[Multicluster-MCP-Sever](https://github.com/yanmxa/multicluster-mcp-server)** - The gateway for GenAI systems to interact with multiple Kubernetes clusters.\n- **[MySQL](https://github.com/benborla/mcp-server-mysql)** (by benborla) - MySQL database integration in NodeJS with configurable access controls and schema inspection\n- **[MySQL](https://github.com/designcomputer/mysql_mcp_server)** (by DesignComputer) - MySQL database integration in Python with configurable access controls and schema inspection\n- **[MySQL-Server](https://github.com/tonycai/mcp-mysql-server)** (by TonyCai) - MySQL Database Integration using Python script with configurable access controls and schema inspection, usng stdio mode to suitable local deployment, you can run it in docker container.\n- **[n8n](https://github.com/leonardsellem/n8n-mcp-server)** - This MCP server provides tools and resources for AI assistants to manage n8n workflows and executions, including listing, creating, updating, and deleting workflows, as well as monitoring their execution status.\n- **[Nacos MCP Router](https://github.com/nacos-group/nacos-mcp-router)** - This MCP(Model Context Protocol) Server provides tools to search, install, proxy other MCP servers.\n- **[Nanana](https://github.com/nanana-app/mcp-server-nano-banana)** - This MCP provides AI text-to-image generator and AI image-to-image editor powered by Google Gemini Nano Banana.\n- **[NASA](https://github.com/ProgramComputer/NASA-MCP-server)** (by ProgramComputer) - Access to a unified gateway of NASA's data sources including but not limited to APOD, NEO, EPIC, GIBS.\n- **[NASA Image MCP Server](https://github.com/adithya1012/NASA-MCP-Server/blob/main/README.md)** - MCP server providing access to NASA's visual data APIs including Mars Rover photos, Earth satellite imagery (EPIC/GIBS), and Astronomy picture of the day. Features built-in image analysis tools with automatic format detection, compression, and base64 conversion for LLM integration.\n- **[NASA Planetary Data System (PDS) MCP Server](https://github.com/NASA-PDS/pds-mcp-server)** - MCP server for connecting to NASA's Planetary Data System (PDS) enabling intelligent data discovery of all of NASA's data products from the 1960s to present day.\n- **[Nasdaq Data Link](https://github.com/stefanoamorelli/nasdaq-data-link-mcp)** (by stefanoamorelli) - An MCP server to access, explore, and interact with Nasdaq Data Link's extensive and valuable financial and economic datasets.\n- **[National Parks](https://github.com/KyrieTangSheng/mcp-server-nationalparks)** - The server provides latest information of park details, alerts, visitor centers, campgrounds, hiking trails, and events for U.S. National Parks.\n- **[NAVER](https://github.com/pfldy2850/py-mcp-naver)** (by pfldy2850) - This MCP server provides tools to interact with various Naver services, such as searching blogs, news, books, and more.\n- **[Naver](https://github.com/isnow890/naver-search-mcp)** (by isnow890) - MCP server for Naver Search API integration, supporting blog, news, shopping search and DataLab analytics features.\n- **[NBA](https://github.com/Taidgh-Robinson/nba-mcp-server)** - This MCP server provides tools to fetch recent and historical NBA games including basic and advanced statistics.\n- **[NCI GDC](https://github.com/QuentinCody/nci-gdc-mcp-server)** - Unofficial MCP server for the National Cancer Institute's Genomic Data Commons (GDC), providing access to harmonized cancer genomic and clinical data for oncology research.\n- **[NCP](https://github.com/portel-dev/ncp)** (Natural Context Provider by portel.dev) - NCP lets your AI dream of a tool and articulate its need as a user story. NCP then intelligently discovers and makes that tool instantly available, streamlining thought processes, eliminating cognitive overload, and slashing token costs by up to 87% (47ms discovery). Experience true on-demand tool access, smart health monitoring, and energy efficiency for your AI agents.\n- **[Neo4j](https://github.com/da-okazaki/mcp-neo4j-server)** - A community built server that interacts with Neo4j Graph Database.\n- **[Neovim](https://github.com/bigcodegen/mcp-neovim-server)** - An MCP Server for your Neovim session.\n- **[Netbird](https://github.com/aantti/mcp-netbird)** - List and analyze Netbird network peers, groups, policies, and more.\n- **[NetMind ParsePro](https://github.com/protagolabs/Netmind-Parse-PDF-MCP)** - The PDF Parser AI service, built and customized by the [NetMind](https://www.netmind.ai/) team.\n- **[NetSuite](https://github.com/dsvantien/netsuite-mcp-server)** - MCP server for NetSuite ERP integration with OAuth 2.0 authentication, enabling natural language access to NetSuite data through SuiteQL queries, reports, saved searches, and REST API operations.\n- **[Nikto MCP](https://github.com/weldpua2008/nikto-mcp)** (by weldpua2008) - A secure MCP server that enables AI agents to interact with Nikto web server scanner](- use with npx or docker).\n- **[NocoDB](https://github.com/edwinbernadus/nocodb-mcp-server)** - Read and write access to NocoDB database.\n- **[Node Code Sandbox](https://github.com/alfonsograziano/node-code-sandbox-mcp)** – A Node.js MCP server that spins up isolated Docker - based sandboxes for executing JavaScript snippets with on-the-fly npm dependency installation\n- **[nomad-mcp](https://github.com/kocierik/mcp-nomad)** - A server that provides a set of tools for managing Nomad clusters through the MCP.\n- **[Notion](https://github.com/suekou/mcp-notion-server)** (by suekou) - Interact with Notion API.\n- **[Notion](https://github.com/v-3/notion-server)** (by v-3) - Notion MCP integration. Search, Read, Update, and Create pages through Claude chat.\n- **[Notion](https://github.com/njbrake/notion-mcp-server)** (by njbrake) - Fork of official Notion MCP Server that returns markdown representation instead of raw json for efficient token usage\n- **[NPM Plus](https://github.com/shacharsol/js-package-manager-mcp)** - AI-powered JavaScript package management with security scanning, bundle analysis, and intelligent dependency management for MCP-compatible editors.\n- **[NS Travel Information](https://github.com/r-huijts/ns-mcp-server)** - Access Dutch Railways (NS) real-time train travel information and disruptions through the official NS API.\n- **[ntfy-mcp](https://github.com/teddyzxcv/ntfy-mcp)** (by teddyzxcv) - The MCP server that keeps you informed by sending the notification on phone using ntfy\n- **[ntfy-me-mcp](https://github.com/gitmotion/ntfy-me-mcp)** (by gitmotion) - An ntfy MCP server for sending/fetching ntfy notifications to your self-hosted ntfy server from AI Agents 📤 (supports secure token auth & more - use with npx or docker!)\n- **[oatpp-mcp](https://github.com/oatpp/oatpp-mcp)** - C++ MCP integration for Oat++. Use [Oat++](https://oatpp.io) to build MCP servers.\n- **[Obsidian Markdown Notes](https://github.com/calclavia/mcp-obsidian)** - Read and search through your Obsidian vault or any directory containing Markdown notes\n- **[Obsidian Notes](https://github.com/Piotr1215/mcp-obsidian)** - Direct file system access to Obsidian vaults with security-first design, advanced search capabilities including MOC (Maps of Content) discovery, and support for obsidian.nvim - no Obsidian app required.\n- **[obsidian-mcp](https://github.com/StevenStavrakis/obsidian-mcp)** - (by Steven Stavrakis) An MCP server for Obsidian.md with tools for searching, reading, writing, and organizing notes.\n- **[OceanBase](https://github.com/yuanoOo/oceanbase_mcp_server)** - (by yuanoOo) A Model Context Protocol (MCP) server that enables secure interaction with OceanBase databases.\n- **[Octocode](https://github.com/bgauryy/octocode-mcp)** - (by Guy Bary) AI-powered developer assistant that enables advanced code research, analysis and discovery across GitHub and NPM realms in realtime\n- **[Odoo](https://github.com/ivnvxd/mcp-server-odoo)** - Connect AI assistants to Odoo ERP systems for business data access and workflow automation.\n- **[Office-PowerPoint-MCP-Server](https://github.com/GongRzhe/Office-PowerPoint-MCP-Server)** - A Model Context Protocol (MCP) server for creating, reading, and manipulating Microsoft PowerPoint documents.\n- **[Office-Visio-MCP-Server](https://github.com/GongRzhe/Office-Visio-MCP-Server)** - A Model Context Protocol (MCP) server for creating, reading, and manipulating Microsoft Visio documents.\n- **[Office-Word-MCP-Server](https://github.com/GongRzhe/Office-Word-MCP-Server)** - A Model Context Protocol (MCP) server for creating, reading, and manipulating Microsoft Word documents.\n- **[Okta](https://github.com/kapilduraphe/okta-mcp-server)** - Interact with Okta API.\n- **[OKX-MCP-Server](https://github.com/memetus/okx-mcp-playground)** - An MCP server provides various blockchain data and market price data via the OKX API. The server enables Claude to perform operations like retrieve assets prices, transaction data, account history data and trade instruction data.\n- **[OneCite](https://github.com/HzaCode/OneCite)** - Universal citation management and academic reference toolkit. Generate citations from DOI, arXiv, titles, or URLs in multiple formats (BibTeX, APA, MLA). Supports 7+ literature types and 10+ academic databases with intelligent metadata completion.\n- **[OneNote](https://github.com/rajvirtual/MCP-Servers/tree/master/onenote)** - (by Rajesh Vijay) An MCP server that connects to Microsoft OneNote using the Microsoft Graph API. Reading notebooks, sections, and pages from OneNote,Creating new notebooks, sections, and pages in OneNote.\n- **[Onyx MCP Sandbox](https://github.com/avd1729/Onyx)** – (by Aravind) A secure MCP server that executes code in isolated Docker sandboxes. Supports Python, Java, C, C++, JavaScript, and Rust. Provides the `run_code` tool, enforces CPU/memory limits, includes comprehensive tests, and detailed setup instructions.\n- **[Open Strategy Partners Marketing Tools](https://github.com/open-strategy-partners/osp_marketing_tools)** - Content editing codes, value map, and positioning tools for product marketing.\n- **[Open Targets](https://github.com/QuentinCody/open-targets-mcp-server)** - Unofficial MCP server for the Open Targets Platform, providing access to target-disease associations, drug discovery data, and therapeutic hypothesis generation for biomedical research.\n- **[OpenAI GPT Image](https://github.com/SureScaleAI/openai-gpt-image-mcp)** - OpenAI GPT image generation/editing MCP server.\n- **[OpenAI WebSearch MCP](https://github.com/ConechoAI/openai-websearch-mcp)** - This is a Python-based MCP server that provides OpenAI `web_search` built-in tool.\n- **[OpenAlex.org MCP](https://github.com/drAbreu/alex-mcp)** - Professional MCP server providing ML-powered author disambiguation and comprehensive researcher profiles using the OpenAlex database.\n- **[OpenAPI](https://github.com/snaggle-ai/openapi-mcp-server)** - Interact with [OpenAPI](https://www.openapis.org/) APIs.\n- **[OpenAPI AnyApi](https://github.com/baryhuang/mcp-server-any-openapi)** - Interact with large [OpenAPI](https://www.openapis.org/) docs using built-in semantic search for endpoints. Allows for customizing the MCP server prefix.\n- **[OpenAPI Schema](https://github.com/hannesj/mcp-openapi-schema)** - Allow LLMs to explore large [OpenAPI](https://www.openapis.org/) schemas without bloating the context.\n- **[OpenAPI Schema Explorer](https://github.com/kadykov/mcp-openapi-schema-explorer)** - Token-efficient access to local or remote OpenAPI/Swagger specs via MCP Resources.\n- **[OpenCTI](https://github.com/Spathodea-Network/opencti-mcp)** - Interact with OpenCTI platform to retrieve threat intelligence data including reports, indicators, malware and threat actors.\n- **[OpenCV](https://github.com/GongRzhe/opencv-mcp-server)** - An MCP server providing OpenCV computer vision capabilities. This allows AI assistants and language models to access powerful computer vision tools.\n- **[OpenDigger MCP Server](https://github.com/X-lab2017/open-digger-mcp-server)** - Model Context Protocol (MCP) server for [OpenDigger](https://open-digger.cn/en/), enabling advanced repository analytics and insights through tools and prompts.\n- **[OpenDota](https://github.com/asusevski/opendota-mcp-server)** - Interact with OpenDota API to retrieve Dota 2 match data, player statistics, and more.\n- **[OpenLink Generic Java Database Connectivity](https://github.com/OpenLinkSoftware/mcp-jdbc-server)** - Generic Database Management System (DBMS) access via Open Database Connectivity (ODBC) Connectors (Drivers)\n- **[OpenLink Generic Open Database Connectivity](https://github.com/OpenLinkSoftware/mcp-odbc-server)** - Generic Database Management System (DBMS) access via Open Database Connectivity (ODBC) Connectors (Drivers)\n- **[OpenLink Generic Python Open Database Connectivity](https://github.com/OpenLinkSoftware/mcp-pyodbc-server)** - Generic Database Management System (DBMS) access via Open Database Connectivity (ODBC) Connectors (Drivers) for PyODBC\n- **[OpenLink Generic SQLAlchemy Object-Relational Database Connectivity for PyODBC](https://github.com/OpenLinkSoftware/mcp-sqlalchemy-server)** - Generic Database Management System (DBMS) access via SQLAlchemy (PyODBC) Connectors (Drivers)\n- **[OpenMetadata](https://github.com/yangkyeongmo/mcp-server-openmetadata)** - MCP Server for OpenMetadata, an open-source metadata management platform.\n- **[OpenNeuro](https://github.com/QuentinCody/open-neuro-mcp-server)** - Unofficial MCP server for OpenNeuro, providing access to open neuroimaging datasets, study metadata, and brain imaging data for neuroscience research and analysis.\n- **[OpenReview](https://github.com/anyakors/openreview-mcp-server)** - An MCP server for [OpenReview](https://openreview.net/) to fetch, read and save manuscripts from AI/ML conferences.\n- **[OpenRPC](https://github.com/shanejonas/openrpc-mpc-server)** - Interact with and discover JSON-RPC APIs via [OpenRPC](https://open-rpc.org).\n- **[OpenStack](https://github.com/wangsqly0407/openstack-mcp-server)** - MCP server implementation that provides OpenStack interaction.\n- **[OpenWeather](https://github.com/mschneider82/mcp-openweather)** - Interact with the free openweathermap API to get the current and forecast weather for a location.\n- **[OpenZIM MCP](https://github.com/cameronrye/openzim-mcp)** - Modern, secure, and high-performance MCP server that enables AI models to access and search ZIM format knowledge bases offline, including Wikipedia and educational content archives.\n- **[Operative WebEvalAgent](https://github.com/Operative-Sh/web-eval-agent)** (by [Operative.sh](https://www.operative.sh)) - An MCP server to test, debug, and fix web applications autonomously.\n- **[OPNSense MCP](https://github.com/vespo92/OPNSenseMCP)** - MCP Server for OPNSense Firewall Management and API access\n- **[Optimade MCP](https://github.com/dianfengxiaobo/optimade-mcp-server)** - An MCP server conducts real-time material science data queries with the Optimade database (for example, elemental composition, crystal structure).\n- **[Oracle](https://github.com/marcelo-ochoa/servers)** (by marcelo-ochoa) - Oracle Database integration in NodeJS with configurable access controls, query explain, stats and schema inspection\n- **[Oracle Cloud Infrastructure (OCI)](https://github.com/karthiksuku/oci-mcp)** (by karthiksukumar) - Python MCP server for OCI infrastructure (Compute, Autonomous Database, Object Storage). Read-heavy by default with safe instance actions (start/stop/reset). Includes Claude Desktop config and `.env` compartment scoping.\n- **[Oura MCP server](https://github.com/tomekkorbak/oura-mcp-server)** - MCP server for Oura API to retrieve one's sleep data\n- **[Oura Ring](https://github.com/rajvirtual/oura-mcp-server)** (by Rajesh Vijay) - MCP Server to access and analyze your Oura Ring data. It provides a structured way to fetch and understand your health metrics.\n- **[Outline](https://github.com/Vortiago/mcp-outline)** - MCP Server to interact with [Outline](https://www.getoutline.com) knowledge base to search, read, create, and manage documents and their content, access collections, add comments, and manage document backlinks.\n- **[Outlook Mail + Calendar + OneDrive](https://github.com/Norcim133/OutlookMCPServer) - Virtual assistant with Outlook Mail, Calendar, and early OneDrive support (requires Azure admin).\n- **[Pacman](https://github.com/oborchers/mcp-server-pacman)** - An MCP server that provides package index querying capabilities. This server is able to search and retrieve information from package repositories like PyPI, npm, crates.io, Docker Hub, and Terraform Registry.\n- **[pancakeswap-poolspy-mcp](https://github.com/kukapay/pancakeswap-poolspy-mcp)** - An MCP server that tracks newly created liquidity pools on Pancake Swap.\n- **[Pandoc](https://github.com/vivekVells/mcp-pandoc)** - MCP server for seamless document format conversion using Pandoc, supporting Markdown, HTML, PDF, DOCX (.docx), csv and more.\n- **[Paradex MCP](https://github.com/sv/mcp-paradex-py)** - MCP native server for interacting with Paradex platform, including fully features trading.\n- **[Parliament MCP]([https://github.com/sv/mcp-paradex-py](https://github.com/i-dot-ai/parliament-mcp))** - MCP server for querying UK parliamentary data.\n- **[PDF reader MCP](https://github.com/gpetraroli/mcp_pdf_reader)** - MCP server to read and search text in a local PDF file.\n- **[PDF Tools MCP](https://github.com/Sohaib-2/pdf-mcp-server)** - Comprehensive PDF manipulation toolkit (merge, split, encrypt, optimize and much more)\n- **[PDMT](https://github.com/paiml/pdmt)** - Pragmatic Deterministic MCP Templating - High-performance deterministic templating library with comprehensive todo validation, quality enforcement, and 0.0 temperature generation for reproducible outputs.\n- **[Peacock for VS Code](https://github.com/johnpapa/peacock-mcp)** - MCP Server for the Peacock extension for VS Code, coloring your world, one Code editor at a time. The main goal of the project is to show how an MCP server can be used to interact with APIs.\n- **[persistproc](https://github.com/irskep/persistproc)** - MCP server + command line tool that allows agents to see & control long-running processes like web servers.\n- **[Pexels](https://github.com/garylab/pexels-mcp-server)** - A MCP server providing access to Pexels Free Image API, enabling seamless search, retrieval, and download of high-quality royalty-free images.\n- **[pgtuner_mcp](https://github.com/isdaniel/pgtuner_mcp)** - provides AI-powered PostgreSQL performance tuning capabilities.\n- **[Pharos](https://github.com/QuentinCody/pharos-mcp-server)** - Unofficial MCP server for the Pharos database by the National Center for Advancing Translational Sciences (NCATS), providing access to target, drug, and disease information for drug discovery research.\n- **[Phone MCP](https://github.com/hao-cyber/phone-mcp)** - 📱 A powerful plugin that lets you control your Android phone. Enables AI agents to perform complex tasks like automatically playing music based on weather or making calls and sending texts.\n- **[PIF](https://github.com/hungryrobot1/MCP-PIF)** - A Personal Intelligence Framework (PIF), providing tools for file operations, structured reasoning, and journal-based documentation to support continuity and evolving human-AI collaboration across sessions.\n- **[Pinecone](https://github.com/sirmews/mcp-pinecone)** - MCP server for searching and uploading records to Pinecone. Allows for simple RAG features, leveraging Pinecone's Inference API.\n- **[Pinner MCP](https://github.com/safedep/pinner-mcp)** - An MCP server for pinning GitHub Actions and container base images to their immutable SHA hashes to prevent supply chain attacks.\n- **[Pixelle MCP](https://github.com/AIDC-AI/Pixelle-MCP)** - An omnimodal AIGC framework that seamlessly converts ComfyUI workflows into MCP tools with zero code, enabling full-modal support for Text, Image, Sound, and Video generation with Chainlit-based web interface.\n- **[Placid.app](https://github.com/felores/placid-mcp-server)** - Generate image and video creatives using Placid.app templates\n- **[Plane](https://github.com/kelvin6365/plane-mcp-server)** - This MCP Server will help you to manage projects and issues through Plane's API\n- **[Playwright](https://github.com/executeautomation/mcp-playwright)** - This MCP Server will help you run browser automation and webscraping using Playwright\n- **[Playwright Wizard](https://github.com/oguzc/playwright-wizard-mcp)** - Step-by-step wizard for generating Playwright E2E tests with best practices.\n- **[Podbean](https://github.com/amurshak/podbeanMCP)** - MCP server for managing your podcasts, episodes, and analytics through the Podbean API. Allows for updating, adding, deleting podcasts, querying show description, notes, analytics, and more.\n- **[Polarsteps](https://github.com/remuzel/polarsteps-mcp)** - An MCP server to help you review your previous Trips and plan new ones!\n- **[PostgreSQL](https://github.com/ahmedmustahid/postgres-mcp-server)** - A PostgreSQL MCP server offering dual HTTP/Stdio transports for database schema inspection and read-only query execution with session management and Podman(or Docker) support.\n- **[Postman](https://github.com/shannonlal/mcp-postman)** - MCP server for running Postman Collections locally via Newman. Allows for simple execution of Postman Server and returns the results of whether the collection passed all the tests.\n- **[Powerdrill](https://github.com/powerdrillai/powerdrill-mcp)** - Interact with Powerdrill datasets, authenticated with [Powerdrill](https://powerdrill.ai) User ID and Project API Key.\n- **[predictive-maintenance-mcp](https://github.com/LGDiMaggio/predictive-maintenance-mcp)** - AI-powered predictive maintenance and fault diagnosis. Features vibration analysis, bearing diagnostics, ISO 20816-3. compliance, and ML anomaly detection for industrial machinery.\n- **[Prefect](https://github.com/allen-munsch/mcp-prefect)** - MCP Server for workflow orchestration and ELT/ETL with Prefect Server, and Prefect Cloud [https://www.prefect.io/] using the `prefect` python client.\n- **[Producer Pal](https://github.com/adamjmurray/producer-pal)** - MCP server for controlling Ableton Live, embedded in a Max for Live device for easy drag and drop installation.\n- **[Productboard](https://github.com/kenjihikmatullah/productboard-mcp)** - Integrate the Productboard API into agentic workflows via MCP.\n- **[Prometheus](https://github.com/pab1it0/prometheus-mcp-server)** - Query and analyze Prometheus - open-source monitoring system.\n- **[Prometheus (Golang)](https://github.com/tjhop/prometheus-mcp-server/)** - A Prometheus MCP server with full API support for comprehensive management and deep interaction with Prometheus beyond basic query support. Written in go, it is a single binary install that is capable of STDIO, SSE, and HTTP transports for complex deployments. \n- **[Prometheus (TypeScript)](https://github.com/yanmxa/prometheus-mcp-server)** - Enable AI assistants to query Prometheus using natural language with TypeScript implementation.\n- **[PubChem](https://github.com/sssjiang/pubchem_mcp_server)** - extract drug information from pubchem API.\n- **[PubMed](https://github.com/JackKuo666/PubMed-MCP-Server)** - Enable AI assistants to search, access, and analyze PubMed articles through a simple MCP interface.\n- **[Pulumi](https://github.com/dogukanakkaya/pulumi-mcp-server)** - MCP Server to Interact with Pulumi API, creates and lists Stacks\n- **[Puppeteer vision](https://github.com/djannot/puppeteer-vision-mcp)** - Use Puppeteer to browse a webpage and return a high quality Markdown. Use AI vision capabilities to handle cookies, captchas, and other interactive elements automatically.\n- **[Pushover](https://github.com/ashiknesin/pushover-mcp)** - Send instant notifications to your devices using [Pushover.net](https://pushover.net/)\n- **[py-mcp-qdrant-rag](https://github.com/amornpan/py-mcp-qdrant-rag)** (by amornpan) - A Model Context Protocol server implementation that provides RAG capabilities through Qdrant vector database integration, enabling AI agents to perform semantic search and document retrieval with local or cloud-based embedding generation support across Mac, Linux, and Windows platforms.\n- **[pydantic/pydantic-ai/mcp-run-python](https://github.com/pydantic/pydantic-ai/tree/main/mcp-run-python)** - Run Python code in a secure sandbox via MCP tool calls, powered by Deno and Pyodide\n- **[Python CLI MCP](https://github.com/ofek/pycli-mcp)** - Interact with local Python command line applications.\n- **[qa-use](https://github.com/desplega-ai/qa-use)** - Browser automation and QA testing capabilities. This server integrates with [desplega.ai](https://desplega.ai) to offer automated testing, session monitoring, batch test execution, and intelligent test guidance using the AAA framework.\n- **[QGIS](https://github.com/jjsantos01/qgis_mcp)** - connects QGIS to Claude AI through the MCP. This integration enables prompt-assisted project creation, layer loading, code execution, and more.\n- **[Qiniu MCP Server](https://github.com/qiniu/qiniu-mcp-server)** - The Model Context Protocol (MCP) Server built on Qiniu Cloud products supports users in accessing Qiniu Cloud Storage, intelligent multimedia services, and more through this MCP Server within the context of AI large model clients.\n- **[QuantConnect](https://github.com/taylorwilsdon/quantconnect-mcp)** - QuantConnect Algorithmic Trading Platform Orchestration MCP - Agentic LLM Driven Trading Strategy Design, Research & Implementation.\n- **[Quarkus](https://github.com/quarkiverse/quarkus-mcp-servers)** - MCP servers for the Quarkus Java framework.\n- **[QuickChart](https://github.com/GongRzhe/Quickchart-MCP-Server)** - A Model Context Protocol server for generating charts using QuickChart.io\n- **[Qwen_Max](https://github.com/66julienmartin/MCP-server-Qwen_Max)** - A Model Context Protocol (MCP) server implementation for the Qwen models.\n- **[RabbitMQ](https://github.com/kenliao94/mcp-server-rabbitmq)** - The MCP server that interacts with RabbitMQ to publish and consume messages.\n- **[RAE](https://github.com/rae-api-com/rae-mcp)** - MPC Server to connect your preferred model with rae-api.com, Roya Academy of Spanish Dictionary\n- **[RAG Local](https://github.com/renl/mcp-rag-local)** - This MCP server for storing and retrieving text passages locally based on their semantic meaning.\n- **[RAG Web Browser](https://github.com/apify/mcp-server-rag-web-browser)** An MCP server for Apify's open-source RAG Web Browser [Actor](https://apify.com/apify/rag-web-browser) to perform web searches, scrape URLs, and return content in Markdown.\n- **[Raindrop.io](https://github.com/hiromitsusasaki/raindrop-io-mcp-server)** - An integration that allows LLMs to interact with Raindrop.io bookmarks using the Model Context Protocol (MCP).\n- **[Random Number](https://github.com/zazencodes/random-number-mcp)** - Provides LLMs with essential random generation abilities, built entirely on Python's standard library.\n- **[RCSB PDB](https://github.com/QuentinCody/rcsb-pdb-mcp-server)** - Unofficial MCP server for the Research Collaboratory for Structural Bioinformatics Protein Data Bank (RCSB PDB), providing access to 3D protein structures, experimental data, and structural bioinformatics information.\n- **[Reaper](https://github.com/dschuler36/reaper-mcp-server)** - Interact with your [Reaper](https://www.reaper.fm/) (Digital Audio Workstation) projects.\n- **[Redbee](https://github.com/Tamsi/redbee-mcp)** - Redbee MCP server that provides support for interacting with Redbee API.\n- **[Redfish](https://github.com/nokia/mcp-redfish)** - Redfish MCP server that provides support for interacting with [DMTF Redfish API](https://www.dmtf.org/standards/redfish).\n- **[Redis](https://github.com/GongRzhe/REDIS-MCP-Server)** - Redis database operations and caching microservice server with support for key-value operations, expiration management, and pattern-based key listing.\n- **[Redis](https://github.com/prajwalnayak7/mcp-server-redis)** MCP server to interact with Redis Server, AWS Memory DB, etc for caching or other use-cases where in-memory and key-value based storage is appropriate\n- **[RedNote MCP](https://github.com/ifuryst/rednote-mcp)** - MCP server for accessing RedNote(XiaoHongShu, xhs) content\n- **[Reed Jobs](https://github.com/kld3v/reed_jobs_mcp)** - Search and retrieve job listings from Reed.co.uk.\n- **[Rememberizer AI](https://github.com/skydeckai/mcp-server-rememberizer)** - An MCP server designed for interacting with the Rememberizer data source, facilitating enhanced knowledge retrieval.\n- **[Replicate](https://github.com/deepfates/mcp-replicate)** - Search, run and manage machine learning models on Replicate through a simple tool-based interface. Browse models, create predictions, track their status, and handle generated images.\n- **[Resend](https://github.com/Klavis-AI/klavis/tree/main/mcp_servers/resend)** - Send email using Resend services\n- **[Restream](https://github.com/shaktech786/restream-mcp-server)** - Model Context Protocol server for Restream API integration - manage multi-platform live streams, control channels, and access streaming analytics.\n- **[Revit MCP](https://github.com/revit-mcp)** - A service implementing the MCP protocol for Autodesk Revit.\n- **[Rijksmuseum](https://github.com/r-huijts/rijksmuseum-mcp)** - Interface with the Rijksmuseum API to search artworks, retrieve artwork details, access image tiles, and explore user collections.\n- **[Riot Games](https://github.com/jifrozen0110/mcp-riot)** - MCP server for League of Legends – fetch player info, ranks, champion stats, and match history via Riot API.\n- **[Rohlik](https://github.com/tomaspavlin/rohlik-mcp)** - Shop groceries across the Rohlik Group platforms (Rohlik.cz, Knuspr.de, Gurkerl.at, Kifli.hu, Sezamo.ro)\n- **[Rquest](https://github.com/xxxbrian/mcp-rquest)** - An MCP server providing realistic browser-like HTTP request capabilities with accurate TLS/JA3/JA4 fingerprints for bypassing anti-bot measures.\n- **[Rust MCP Filesystem](https://github.com/rust-mcp-stack/rust-mcp-filesystem)** - Fast, asynchronous MCP server for efficient handling of various filesystem operations built with the power of Rust.\n- **[SafetySearch](https://github.com/surabhya/SafetySearch)** - Real-time FDA food safety data: recalls, adverse events, analysis.\n- **[Salesforce MCP](https://github.com/smn2gnt/MCP-Salesforce)** - Interact with Salesforce Data and Metadata\n- **[Salesforce MCP (AiondaDotCom)](https://github.com/AiondaDotCom/mcp-salesforce)** - Universal Salesforce integration with OAuth authentication, smart learning system, comprehensive backup capabilities, and full CRUD operations for any Salesforce org including custom objects and fields.\n- **[Salesforce MCP Server](https://github.com/tsmztech/mcp-server-salesforce)** - Comprehensive Salesforce integration with tools for querying records, executing Apex, managing fields/objects, and handling debug logs\n- **[Scanova MCP Server](https://github.com/trycon/scanova-mcp)** - MCP server for creating and managing QR codes using the [Scanova](https://scanova.io) API. Provides tools for generating, managing, and downloading QR codes.\n- **[SchemaCrawler](https://github.com/schemacrawler/SchemaCrawler-MCP-Server-Usage)** - Connect to any relational database, and be able to get valid SQL, and ask questions like what does a certain column prefix mean.\n- **[SchemaFlow](https://github.com/CryptoRadi/schemaflow-mcp-server)** - Real-time PostgreSQL & Supabase database schema access for AI-IDEs via Model Context Protocol. Provides live database context through secure SSE connections with three powerful tools: get_schema, analyze_database, and check_schema_alignment. [SchemaFlow](https://schemaflow.dev)\n- **[Scholarly](https://github.com/adityak74/mcp-scholarly)** - An MCP server to search for scholarly and academic articles.\n- **[scrapling-fetch](https://github.com/cyberchitta/scrapling-fetch-mcp)** - Access text content from bot-protected websites. Fetches HTML/markdown from sites with anti-automation measures using Scrapling.\n- **[Screeny](https://github.com/rohanrav/screeny)** - Privacy-first macOS MCP server that provides visual context for AI agents through window screenshots\n- **[ScriptFlow](https://github.com/yanmxa/scriptflow-mcp)** - Transform complex, repetitive AI interactions into persistent, executable scripts with comprehensive script management (add, edit, remove, list, search, execute) and multi-language support (Bash, Python, Node.js, TypeScript).\n- **[SearXNG](https://github.com/ihor-sokoliuk/mcp-searxng)** - A Model Context Protocol Server for [SearXNG](https://docs.searxng.org)\n- **[SearXNG](https://github.com/erhwenkuo/mcp-searxng)** - An MCP server provide web searching via [SearXNG](https://docs.searxng.org) & retrieve url as makrdown.\n- **[SearXNG Public](https://github.com/pwilkin/mcp-searxng-public)** - A Model Context Protocol Server for retrieving data from public [SearXNG](https://docs.searxng.org) instances, with fallback support\n- **[SEC EDGAR](https://github.com/stefanoamorelli/sec-edgar-mcp)** - (by Stefano Amorelli) A community Model Context Protocol Server to access financial filings and data through the U.S. Securities and Exchange Commission ([SEC](https://www.sec.gov/)) `Electronic Data Gathering, Analysis, and Retrieval` ([EDGAR](https://www.sec.gov/submit-filings/about-edgar)) database\n- **[SendGrid](https://github.com/recepyavuz0/sendgrid-mcp-server)** - An MCP server to integrate with SendGrid's API, enabling AI assistants (like Claude, ChatGPT, etc.) to send emails, manage templates, and track email statistics.\n- **[SEO MCP](https://github.com/cnych/seo-mcp)** - A free SEO tool MCP (Model Control Protocol) service based on Ahrefs data. Includes features such as backlinks, keyword ideas, and more. by [claudemcp](https://www.claudemcp.com/servers/seo-mcp).\n- **[Serper](https://github.com/garylab/serper-mcp-server)** - An MCP server that performs Google searches using [Serper](https://serper.dev).\n- **[ServiceNow](https://github.com/osomai/servicenow-mcp)** - An MCP server to interact with a ServiceNow instance\n- **[ShaderToy](https://github.com/wilsonchenghy/ShaderToy-MCP)** - This MCP server lets LLMs to interact with the ShaderToy API, allowing LLMs to learn from compute shaders examples and enabling them to create complex GLSL shaders that they are previously not capable of.\n- **[ShareSeer](https://github.com/shareseer/shareseer-mcp-server)** - MCP to Access SEC filings, financials & insider trading data in real time using [ShareSeer](https://shareseer.com)\n- **[Shell](https://github.com/sonirico/mcp-shell)** - Give hands to AI. MCP server to run shell commands securely, auditably, and on demand\n- **[Shodan MCP](https://github.com/Hexix23/shodan-mcp)** - MCP server to interact with [Shodan](https://www.shodan.io/)\n- **[Shopify](https://github.com/GeLi2001/shopify-mcp)** - MCP to interact with Shopify API including order, product, customers and so on.\n- **[Shopify Storefront](https://github.com/QuentinCody/shopify-storefront-mcp-server)** - Unofficial MCP server that allows AI agents to discover Shopify storefronts and interact with them to fetch products, collections, and other store data through the Storefront API.\n- **[Simple Loki MCP](https://github.com/ghrud92/simple-loki-mcp)** - A simple MCP server to query Loki logs using logcli.\n- **[Siri Shortcuts](https://github.com/dvcrn/mcp-server-siri-shortcuts)** - MCP to interact with Siri Shortcuts on macOS. Exposes all Shortcuts as MCP tools.\n- **[Skyvern](https://github.com/Skyvern-AI/skyvern/tree/main/integrations/mcp)** - MCP to let Claude / Windsurf / Cursor / your LLM control the browser\n- **[Slack](https://github.com/korotovsky/slack-mcp-server)** - The most powerful MCP server for Slack Workspaces. This integration supports both Stdio and SSE transports, proxy settings and does not require any permissions or bots being created or approved by Workspace admins 😏.\n- **[Slack](https://github.com/zencoderai/slack-mcp-server)** - Slack MCP server which supports both stdio and Streamable HTTP transports. Extended from the original Anthropic's implementation which is now [archived](https://github.com/modelcontextprotocol/servers-archived/tree/main/src/slack)\n- **[Slidespeak](https://github.com/SlideSpeak/slidespeak-mcp)** - Create PowerPoint presentations using the [Slidespeak](https://slidespeak.com/) API.\n- **[Smartlead](https://github.com/jean-technologies/smartlead-mcp-server-local)** - MCP to connect to Smartlead. Additional, tooling, functionality, and connection to workflow automation platforms also available.\n- **[Snowflake](https://github.com/Snowflake-Labs/mcp)** - Open-source MCP server for Snowflake from official Snowflake-Labs supports prompting Cortex Agents, querying structured & unstructured data, object management, SQL execution, semantic view querying, and more. RBAC, fine-grained CRUD controls, and all authentication methods supported.\n- **[Snowflake](https://github.com/isaacwasserman/mcp-snowflake-server)** - This MCP server enables LLMs to interact with Snowflake databases, allowing for secure and controlled data operations.\n- **[Snowflake Cortex MCP Server](https://github.com/thisisbhanuj/Snowflake-Cortex-MCP-Server)** -This Snowflake MCP server provides tooling for Snowflake Cortex AI features, bringing these capabilities to the MCP ecosystem. When connected to an MCP Client (e.g. Claude for Desktop, fast-agent, Agentic Orchestration Framework), users can leverage these Cortex AI features.\n- **[SoccerDataAPI](https://github.com/yeonupark/mcp-soccer-data)** - This MCP server provides real-time football match data based on the SoccerDataAPI.\n- **[Solana Agent Kit](https://github.com/sendaifun/solana-agent-kit/tree/main/examples/agent-kit-mcp-server)** - This MCP server enables LLMs to interact with the Solana blockchain with help of Solana Agent Kit by SendAI, allowing for 40+ protocol actions and growing\n- **[Solr MCP](https://github.com/mjochum64/mcp-solr-search)** - This MCP server offers a basic functionality to perform a search on Solr servers.\n- **[Solver](https://github.com/szeider/mcp-solver)** - Solves constraint satisfaction and optimization problems .\n- **[Solvitor](https://github.com/Adeptus-Innovatio/solvitor-mcp)** – Solvitor MCP server provides tools to access reverse engineering tools that help developers extract IDL files from closed - source Solana smart contracts and decompile them.\n- **[Source to Knowledge Base](https://github.com/vezlo/src-to-kb)** - Convert source code repositories into searchable knowledge bases with AI-powered search using GPT-5, intelligent chunking, and OpenAI embeddings for semantic code understanding.\n- **[Sourcerer](https://github.com/st3v3nmw/sourcerer-mcp)** - MCP for semantic code search & navigation that reduces token waste.\n- **[Specbridge](https://github.com/TBosak/specbridge)** - Easily turn your OpenAPI specs into MCP Tools.\n- **[Splunk](https://github.com/jkosik/mcp-server-splunk)** - Golang MCP server for Splunk (lists saved searches, alerts, indexes, macros...). Supports SSE and STDIO.\n- **[Spotify](https://github.com/varunneal/spotify-mcp)** - This MCP allows an LLM to play and use Spotify.\n- **[Spring Initializr](https://github.com/hpalma/springinitializr-mcp)** - This MCP allows an LLM to create Spring Boot projects with custom configurations. Instead of manually visiting start.spring.io, you can now ask your AI assistant to generate projects with specific dependencies, Java versions, and project structures.\n- **[Squad AI](https://github.com/the-basilisk-ai/squad-mcp)** – Product‑discovery and strategy platform integration. Create, query and update opportunities, solutions, outcomes, requirements and feedback from any MCP‑aware LLM.\n- **[SSH](https://github.com/AiondaDotCom/mcp-ssh)** - Agent for managing and controlling SSH connections.\n- **[SSH](https://github.com/classfang/ssh-mcp-server)** - An MCP server that can execute SSH commands remotely, upload files, download files, and so on.\n- **[SSH MCP Server](https://github.com/sinjab/mcp_ssh)** - A production-ready Model Context Protocol server for SSH automation with background execution, file transfers, and comprehensive timeout protection. Features structured output, progress tracking, and enterprise-grade testing (87% coverage).\n- **[sslmon](https://github.com/firesh/sslmon-mcp)** - Domain/HTTPS/SSL domain registration information and SSL certificate monitoring capabilities. Query domain registration and expiration information, and SSL certificate information and validity status for any domain.\n- **[STAC](https://github.com/Wayfinder-Foundry/stac-mcp)** - STAC catalog and item search MCP server for rapid geospatial data discovery.\n- **[Standard Korean Dictionary](https://github.com/privetin/stdict)** - Search the dictionary using API\n- **[Star Wars](https://github.com/johnpapa/mcp-starwars)** -MCP Server for the SWAPI Star Wars API. The main goal of the project is to show how an MCP server can be used to interact with APIs.\n- **[Starknet MCP Server](https://github.com/mcpdotdirect/starknet-mcp-server)** - A comprehensive MCP server for interacting with the Starknet blockchain, providing tools for querying blockchain data, resolving StarknetIDs, and performing token transfers.\n- **[Starling Bank](https://github.com/domdomegg/starling-bank-mcp)** - View and manage Starling Bank accounts and transactions through the Starling Bank API, including account balance checking and transaction history.\n- **[Starwind UI](https://github.com/Boston343/starwind-ui-mcp/)** - This MCP provides relevant commands, documentation, and other information to allow LLMs to take full advantage of Starwind UI's open source Astro components.\n- **[Stellar](https://github.com/syronlabs/stellar-mcp/)** - This MCP server enables LLMs to interact with the Stellar blockchain to create accounts, check address balances, analyze transactions, view transaction history, mint new assets, interact with smart contracts and much more.\n- **[Stitch AI](https://github.com/StitchAI/stitch-ai-mcp/)** - Knowledge management system for AI agents with memory space creation and retrieval capabilities.\n- **[Stockfish](https://github.com/sonirico/mcp-stockfish)** - MCP server connecting AI systems to Stockfish chess engine\n- **[Storybook](https://github.com/stefanoamorelli/storybook-mcp-server)** (by Stefano Amorelli) - Interact with Storybook component libraries, enabling component discovery, story management, prop inspection, and visual testing across different viewports.\n- **[Strava](https://github.com/r-huijts/strava-mcp)** - Connect to the Strava API to access activity data, athlete profiles, segments, and routes, enabling fitness tracking and analysis with Claude.\n- **[Strava API](https://github.com/tomekkorbak/strava-mcp-server)** - MCP server for Strava API to retrieve one's activities\n- **[Stripe](https://github.com/atharvagupta2003/mcp-stripe)** - This MCP allows integration with Stripe for handling payments, customers, and refunds.\n- **[Substack/Medium](https://github.com/jonathan-politzki/mcp-writer-substack)** - Connect Claude to your Substack/Medium writing, enabling semantic search and analysis of your published content.\n- **[System Health](https://github.com/thanhtung0201/mcp-remote-system-health)** - The MCP (Multi-Channel Protocol) System Health Monitoring is a robust, real-time monitoring solution designed to provide comprehensive health metrics and alerts for remote Linux servers.\n- **[SystemSage](https://github.com/Tarusharma1/SystemSage)** - A powerful, cross-platform system management and monitoring tool for Windows, Linux, and macOS.\n- **[Talk To Figma](https://github.com/sonnylazuardi/cursor-talk-to-figma-mcp)** - This MCP server enables LLMs to interact with Figma, allowing them to read and modify designs programmatically.\n- **[Talk To Figma via Claude](https://github.com/gaganmanku96/talk-with-figma-claude)** - TMCP server that provides seamless Figma integration specifically for Claude Desktop, enabling design creation, modification, and real-time collaboration through natural language commands.\n- **[TAM MCP Server](https://github.com/gvaibhav/TAM-MCP-Server)** - Market research and business intelligence with TAM/SAM calculations and integration across 8 economic data sources: Alpha Vantage, BLS, Census Bureau, FRED, IMF, Nasdaq Data Link, OECD, and World Bank.\n- **[Tasks](https://github.com/flesler/mcp-tasks)** - An efficient task manager. Designed to minimize tool confusion and maximize LLM budget efficiency while providing powerful search, filtering, and organization capabilities across multiple file formats (Markdown, JSON, YAML)\n- **[Tavily search](https://github.com/RamXX/mcp-tavily)** - An MCP server for Tavily's search & news API, with explicit site inclusions/exclusions\n- **[TcpSocketMCP](https://github.com/SpaceyKasey/TcpSocketMCP/)** - A Model Context Protocol (MCP) server that provides raw TCP socket access, enabling AI models to interact directly with network services using raw TCP Sockets. Supports multiple concurrent connections, buffering of response data and triggering automatic responses.\n- **[TeamRetro](https://github.com/adepanges/teamretro-mcp-server)** - This MCP server allows LLMs to interact with TeamRetro, allowing LLMs to manage user, team, team member, retrospective, health check, action, agreement and fetch the reports.\n- **[Telegram](https://github.com/chigwell/telegram-mcp)** - An MCP server that provides paginated chat reading, message retrieval, and message sending capabilities for Telegram through Telethon integration.\n- **[Telegram-Client](https://github.com/chaindead/telegram-mcp)** - A Telegram API bridge that manages user data, dialogs, messages, drafts, read status, and more for seamless interactions.\n- **[Telegram-mcp-server](https://github.com/DLHellMe/telegram-mcp-server)** - Access Telegram channels and groups directly in Claude. Features dual-mode operation with API access (100x faster) or web scraping, unlimited post retrieval, and search functionality.\n- **[Template MCP Server](https://github.com/mcpdotdirect/template-mcp-server)** - A CLI tool to create a new Model Context Protocol server project with TypeScript support, dual transport options, and an extensible structure\n- **[Tempo](https://github.com/scottlepp/tempo-mcp-server)** - An MCP server to query traces/spans from [Grafana Tempo](https://github.com/grafana/tempo).\n- **[Tensorboard Query](https://github.com/Alir3z4/tb-query)** - An MCP server for querying and analyzing TensorBoard event files.\n- **[Teradata](https://github.com/arturborycki/mcp-teradata)** - his MCP server enables LLMs to interact with Teradata databases. This MCP Server support tools and prompts for multi task data analytics\n- **[Terminal-Control](https://github.com/GongRzhe/terminal-controller-mcp)** - An MCP server that enables secure terminal command execution, directory navigation, and file system operations through a standardized interface.\n- **[Terraform-Cloud](https://github.com/severity1/terraform-cloud-mcp)** - An MCP server that integrates AI assistants with the Terraform Cloud API, allowing you to manage your infrastructure through natural conversation.\n- **[TFT-Match-Analyzer](https://github.com/GeLi2001/tft-mcp-server)** - MCP server for teamfight tactics match history & match details fetching, providing user the detailed context for every match.\n- **[Thales CDSP CAKM MCP Server](https://github.com/sanyambassi/thales-cdsp-cakm-mcp-server)** - An MCP server for the Thales CipherTrust Data Security Platform (CDSP) Cloud Key Management (CAKM) connector. This MCP server supports Ms SQL and Oracle databases.\n- **[Thales CDSP CRDP MCP Server](https://github.com/sanyambassi/thales-cdsp-crdp-mcp-server)** - A Model Context Protocol (MCP) server that allows interacting with the CipherTrust RestFul Data Protection (CRDP) data protection service.\n- **[Thales CipherTrust Manager MCP Server](https://github.com/sanyambassi/ciphertrust-manager-mcp-server)** - MCP server for Thales CipherTrust Manager integration, enabling secure key management and cryptographic operations.\n- **[thegraph-mcp](https://github.com/kukapay/thegraph-mcp)** - An MCP server that powers AI agents with indexed blockchain data from The Graph.\n- **[TheHive MCP Server](https://github.com/redwaysecurity/the-hive-mcp-server)** - An MCP server for [TheHive](https://strangebee.com/thehive/) Security Incident Response Platform.\n- **[Things3 MCP](https://github.com/urbanogardun/things3-mcp)** - Things3 task management integration for macOS with comprehensive TODO, project, and tag management.\n- **[Think MCP](https://github.com/Rai220/think-mcp)** - Enhances any agent's reasoning capabilities by integrating the think-tools, as described in [Anthropic's article](https://www.anthropic.com/engineering/claude-think-tool).\n- **[Think Node MCP](https://github.com/abhinav-mangla/think-tool-mcp)** - Enhances any agent's reasoning capabilities by integrating the think-tools, as described in [Anthropic's article](https://www.anthropic.com/engineering/claude-think-tool). (Works with Node)\n- **[Ticket-Generator MCP](https://github.com/trycon/ticket-generator-mcp)** - A Model Context Protocol (MCP) server implemented in Streamable HTTP transport that allows AI models to interact with the [Ticket Generator](https://ticket-generator.com/) APIs, enabling fetching active events lists, and generating tickets via 3 different modes.\n- **[Ticketmaster](https://github.com/delorenj/mcp-server-ticketmaster)** - Search for events, venues, and attractions through the Ticketmaster Discovery API\n- **[Ticketmaster MCP Server](https://github.com/mochow13/ticketmaster-mcp-server)** - A Model Context Protocol (MCP) server implemented in Streamable HTTP transport that allows AI models to interact with the Ticketmaster Discovery API, enabling searching events, venues, and attractions.\n- **[TickTick](https://github.com/alexarevalo9/ticktick-mcp-server)** - A Model Context Protocol (MCP) server designed to integrate with the TickTick task management platform, enabling intelligent context-aware task operations and automation.\n- **[Tideways](https://github.com/abuhamza/tideways-mcp-server)** - A Model Context Protocol server that enables AI assistants to query Tideways performance monitoring data and provide conversational performance insights for PHP applications.\n- **[TigerGraph](https://github.com/custom-discoveries/TigerGraph_MCP)** - A community built MCP server that interacts with TigerGraph Graph Database.\n- **[TikTok Ads](https://github.com/AdsMCP/tiktok-ads-mcp-server)** - An MCP server for interacting with TikTok advertising platforms for campaign management, performance analytics, audience targeting, creative management, and custom reporting.\n- **[time-mcp-nuget](https://github.com/domdomegg/time-mcp-nuget)** - Get current UTC time in RFC 3339 format (.NET/NuGet implementation).\n- **[time-mcp-pypi](https://github.com/domdomegg/time-mcp-pypi)** - Get current UTC time in RFC 3339 format (Python/PyPI implementation).\n- **[tip.md](https://github.com/tipdotmd#-mcp-server-for-ai-assistants)** - An MCP server that enables AI assistants to interact with tip.md's crypto tipping functionality, allowing agents or supporters to tip registered developers directly from AI chat interfaces.\n- **[TMD Earthquake](https://github.com/amornpan/tmd-earthquake-server-1.0)** - 🌍 Real-time earthquake monitoring from Thai Meteorological Department. Features magnitude filtering, location-based search (Thai/English), today's events tracking, dangerous earthquake alerts, and comprehensive statistics. Covers regional and global seismic activities.\n- **[TMDB](https://github.com/Laksh-star/mcp-server-tmdb)** - This MCP server integrates with The Movie Database (TMDB) API to provide movie information, search capabilities, and recommendations.\n- **[Todoist](https://github.com/abhiz123/todoist-mcp-server)** - Interact with Todoist to manage your tasks.\n- **[Todos](https://github.com/tomelliot/todos-mcp)** - A practical todo list manager to use with your favourite chatbot.\n- **[token-minter-mcp](https://github.com/kukapay/token-minter-mcp)** - An MCP server providing tools for AI agents to mint ERC-20 tokens across multiple blockchains.\n- **[token-revoke-mcp](https://github.com/kukapay/token-revoke-mcp)** - An MCP server for checking and revoking ERC-20 token allowances across multiple blockchains.\n- **[Ton Blockchain MCP](https://github.com/devonmojito/ton-blockchain-mcp)** - An MCP server for interacting with Ton Blockchain.\n- **[Topolograph MCP](https://github.com/Vadims06/topolograph-mcp-server)** – A MCP server that enables LLMs to interact with OSPF and IS - IS protocols and analyze network topologies, query network events, and perform path calculations for OSPF and IS-IS protocols.\n- **[TouchDesigner](https://github.com/8beeeaaat/touchdesigner-mcp)** - An MCP server for TouchDesigner, enabling interaction with TouchDesigner projects, nodes, and parameters.\n- **[Transcribe](https://github.com/transcribe-app/mcp-transcribe)** - An MCP server provides fast and reliable transcriptions for audio/video files and voice memos. It allows LLMs to interact with the text content of audio/video file.\n- **[Travel Planner](https://github.com/GongRzhe/TRAVEL-PLANNER-MCP-Server)** - Travel planning and itinerary management server integrating with Google Maps API for location search, place details, and route calculations.\n- **[Trello MCP Server](https://github.com/lioarce01/trello-mcp-server)** - An MCP server that interact with user Trello boards, modifying them with prompting.\n- **[Trino](https://github.com/tuannvm/mcp-trino)** - A high-performance Model Context Protocol (MCP) server for Trino implemented in Go.\n- **[Tripadvisor](https://github.com/pab1it0/tripadvisor-mcp)** - An MCP server that enables LLMs to interact with Tripadvisor API, supporting location data, reviews, and photos through standardized MCP interfaces\n- **[Triplyfy MCP](https://github.com/helpful-AIs/triplyfy-mcp)** - An MCP server that lets LLMs plan and manage itineraries with interactive maps in Triplyfy; manage itineraries, places and notes, and search/save flights.\n- **[TrueNAS Core MCP](https://github.com/vespo92/TrueNasCoreMCP)** - An MCP server for interacting with TrueNAS Core.\n- **[TuriX Computer Automation MCP](https://github.com/TurixAI/TuriX-CUA/tree/mac_mcp)** - MCP server for helping automation control your computer complete your pre-setting task.\n- **[Tyk API Management](https://github.com/TykTechnologies/tyk-dashboard-mcp)** - Chat with all of your organization's managed APIs and perform other API lifecycle operations, managing tokens, users, analytics, and more.\n- **[Typesense](https://github.com/suhail-ak-s/mcp-typesense-server)** - A Model Context Protocol (MCP) server implementation that provides AI models with access to Typesense search capabilities. This server enables LLMs to discover, search, and analyze data stored in Typesense collections.\n- **[UniFi Dream Machine](https://github.com/sabler/mcp-unifi)** An MCP server that gets your network telemetry from the UniFi Site Manager and your local UniFi router.\n- **[UniProt](https://github.com/QuentinCody/uniprot-mcp-server)** - Unofficial MCP server for UniProt, providing access to protein sequence data, functional annotations, taxonomic information, and cross-references for proteomics and bioinformatics research.\n- **[uniswap-poolspy-mcp](https://github.com/kukapay/uniswap-poolspy-mcp)** - An MCP server that tracks newly created liquidity pools on Uniswap across nine blockchain networks.\n- **[uniswap-trader-mcp](https://github.com/kukapay/uniswap-trader-mcp)** -An MCP server for AI agents to automate token swaps on Uniswap DEX across multiple blockchains.\n- **[Unity Catalog](https://github.com/ognis1205/mcp-server-unitycatalog)** - An MCP server that enables LLMs to interact with Unity Catalog AI, supporting CRUD operations on Unity Catalog Functions and executing them as MCP tools.\n- **[Unity Integration (Advanced)](https://github.com/quazaai/UnityMCPIntegration)** - Advanced Unity3d Game Engine MCP which supports ,Execution of Any Editor Related Code Directly Inside of Unity, Fetch Logs, Get Editor State and Allow File Access of the Project making it much more useful in Script Editing or asset creation.\n- **[Unity MCP (AI Game Developer)](https://github.com/IvanMurzak/Unity-MCP)** - `Unity Editor` and `Unity Runtime` MCP integration. Unit Test, Coding, C# Roslyn, Reflection, Assets. Helps to create games with AI. And helps to run AI logic in the game in runtime. \n- **[Unity3d Game Engine](https://github.com/CoderGamester/mcp-unity)** - An MCP server that enables LLMs to interact with Unity3d Game Engine, supporting access to a variety of the Unit's Editor engine tools (e.g. Console Logs, Test Runner logs, Editor functions, hierarchy state, etc) and executing them as MCP tools or gather them as resources.\n- **[Universal MCP Servers](https://github.com/universal-mcp)** - A collection of MCP servers created using the [AgentR Universal MCP SDK](https://github.com/universal-mcp/universal-mcp).\n- **[Unleash Integration (Feature Toggle)](https://github.com/cuongtl1992/unleash-mcp)** - A Model Context Protocol (MCP) server implementation that integrates with Unleash Feature Toggle system. Provide a bridge between LLM applications and Unleash feature flag system\n- **[Upbit MCP Server](https://github.com/solangii/upbit-mcp-server)** – An MCP server that enables real - time access to cryptocurrency prices, market summaries, and asset listings from the Upbit exchange.\n- **[USA Spending MCP Server](https://github.com/thsmale/usaspending-mcp-server)** – This leverages the official source of government spending data [USASPENDING.gov](https://www.usaspending.gov/). Which enables one to track government spending over time, search government spending by agency, explore government spending to communities, and much more.\n- **[use_aws_mcp](https://github.com/runjivu/use_aws_mcp)** - amazon-q-cli's use_aws tool extracted into independent mcp, for general aws api usage.\n- **[User Feedback](https://github.com/mrexodia/user-feedback-mcp)** - Simple MCP Server to enable a human-in-the-loop workflow in tools like Cline and Cursor.\n- **[Useless Toolkit](https://uselesstoolkit.com/apis/mcp-servers)** - MCP-ready server endpoints for utility APIs, including Password Generator, IP2Geo etc., are provided by UselessToolkit.com, allowing seamless integration with AI agents via secure RapidAPI connections.\n- **[USPTO](https://github.com/riemannzeta/patent_mcp_server)** - MCP server for accessing United States Patent & Trademark Office data through its Open Data Protocol (ODP) API.\n- **[Vectara](https://github.com/vectara/vectara-mcp)** - Query Vectara's trusted RAG-as-a-service platform.\n- **[Vega-Lite](https://github.com/isaacwasserman/mcp-vegalite-server)** - Generate visualizations from fetched data using the VegaLite format and renderer.\n- **[Vertica](https://github.com/nolleh/mcp-vertica)** - Vertica database integration in Python with configurable access controls and schema inspection\n- **[Vibe Check](https://github.com/PV-Bhat/vibe-check-mcp-server)** - An MCP server leveraging an external oversight layer to \"vibe check\" agents, and also self-improve accuracy & user alignment over time. Prevents scope creep, code bloat, misalignment, misinterpretation, tunnel vision, and overcomplication.\n- **[Video Editor](https://github.com/burningion/video-editing-mcp)** - A Model Context Protocol Server to add, edit, and search videos with [Video Jungle](https://www.video-jungle.com/).\n- **[Video Still Capture](https://github.com/13rac1/videocapture-mcp)** - 📷 Capture video stills from an OpenCV-compatible webcam or other video source.\n- **[Virtual location (Google Street View,etc.)](https://github.com/mfukushim/map-traveler-mcp)** - Integrates Google Map, Google Street View, PixAI, Stability.ai, ComfyUI API and Bluesky to provide a virtual location simulation in LLM (written in Effect.ts)\n- **[VMware Fusion](https://github.com/yeahdongcn/vmware-fusion-mcp-server)** - Manage VMware Fusion virtual machines via the Fusion REST API.\n- **[Voice Status Report](https://github.com/tomekkorbak/voice-status-report-mcp-server)** - An MCP server that provides voice status updates using OpenAI's text-to-speech API, to be used with Cursor or Claude Code.\n- **[VoiceMode](https://github.com/mbailey/voicemode)** - Enable voice conversations with Claude using any OpenAI-compatible STT/TTS service [getvoicemode.com](https://getvoicemode.com/)\n- **[VolcEngine TOS](https://github.com/dinghuazhou/sample-mcp-server-tos)** - A sample MCP server for VolcEngine TOS that flexibly get objects from TOS.\n- **[Voyp](https://github.com/paulotaylor/voyp-mcp)** - VOYP MCP server for making calls using Artificial Intelligence.\n- **[vscode-ai-model-detector](https://github.com/thisis-romar/vscode-ai-model-detector)** - Real-time AI model detection for VS Code Copilot with 100% accuracy. Enables proper git attribution by identifying active models (Claude, GPT, Gemini) via Chat Participant API.\n- **[vulnicheck](https://github.com/andrasfe/vulnicheck)** - Real-time Python package vulnerability scanner that checks dependencies against OSV and NVD databases, providing comprehensive security analysis with CVE details, lock file support, and actionable upgrade recommendations.\n- **[Wanaku MCP Router](https://github.com/wanaku-ai/wanaku/)** - The Wanaku MCP Router is a SSE-based MCP server that provides an extensible routing engine that allows integrating your enterprise systems with AI agents.\n- **[weather-mcp-server](https://github.com/devilcoder01/weather-mcp-server)** - Get real-time weather data for any location using weatherapi.\n- **[Web Search MCP](https://github.com/mrkrsl/web-search-mcp)** - A server that provides full web search, summaries and page extration for use with Local LLMs.\n- **[Webex](https://github.com/Kashyap-AI-ML-Solutions/webex-messaging-mcp-server)** - A Model Context Protocol (MCP) server that provides AI assistants with comprehensive access to Cisco Webex messaging capabilities.\n- **[Webflow](https://github.com/kapilduraphe/webflow-mcp-server)** - Interact with the Webflow APIs\n- **[webhook-mcp](https://github.com/noobnooc/webhook-mcp)** (by Nooc) - A Model Context Protocol (MCP) server that sends webhook notifications when called.\n- **[Wekan](https://github.com/namar0x0309/wekan-mcp)** - Unofficial MCP server for Wekan, providing all rest api functionality to add, edit, delete tasks and boards.\n- **[whale-tracker-mcp](https://github.com/kukapay/whale-tracker-mcp)**  -  A mcp server for tracking cryptocurrency whale transactions.\n- **[WhatsApp MCP Server](https://github.com/lharries/whatsapp-mcp)** - MCP server for your personal WhatsApp handling individuals, groups, searching and sending.\n- **[Whois MCP](https://github.com/bharathvaj-ganesan/whois-mcp)** - MCP server that performs whois lookup against domain, IP, ASN and TLD.\n- **[Withings](https://github.com/akutishevsky/withings-mcp)** - Access and analyze Withings health data including sleep analysis, body measurements, workouts, ECG recordings, and fitness goals through natural conversation.\n- **[Wikidata MCP](https://github.com/zzaebok/mcp-wikidata)** - Wikidata MCP server that interact with Wikidata, by searching identifiers, extracting metadata, and executing sparql query.\n- **[Wikidata SPARQL](https://github.com/QuentinCody/wikidata-sparql-mcp-server)** - Unofficial REMOTE MCP server for Wikidata's SPARQL endpoint, providing access to structured knowledge data, entity relationships, and semantic queries for research and data analysis.\n- **[Wikifunctions](https://github.com/Fredibau/wikifunctions-mcp-fredibau)** - Allowing AI models to discover and execute functions from the WikiFunctions library.\n- **[Wikipedia MCP](https://github.com/Rudra-ravi/wikipedia-mcp)** - Access and search Wikipedia articles via MCP for AI-powered information retrieval.\n- **[WildFly MCP](https://github.com/wildfly-extras/wildfly-mcp)** - WildFly MCP server that enables LLM to interact with running WildFly servers (retrieve metrics, logs, invoke operations, ...).\n- **[Windows CLI](https://github.com/SimonB97/win-cli-mcp-server)** - MCP server for secure command-line interactions on Windows systems, enabling controlled access to PowerShell, CMD, and Git Bash shells.\n- **[Windsor](https://github.com/windsor-ai/windsor_mcp)** - Windsor MCP (Model Context Protocol) enables your LLM to query, explore, and analyze your full-stack business data integrated into Windsor.ai with zero SQL writing or custom scripting.\n- **[Wordle MCP](https://github.com/cr2007/mcp-wordle-python)** - MCP Server that gets the Wordle Solution for a particular date.\n- **[WordPress MCP](https://github.com/Automattic/wordpress-mcp)** - Make your WordPress site into a simple MCP server, exposing functionality to LLMs and AI agents.\n- **[WordPress MCP Adapter](https://github.com/WordPress/mcp-adapter)** - An MCP adapter that bridges the Abilities API to the Model Context Protocol, enabling MCP clients to discover and invoke WordPress plugin, theme, and core abilities programmatically.\n- **[Workflowy](https://github.com/danield137/mcp-workflowy)** - A server that interacts with [workflowy](https://workflowy.com/).\n- **[World Bank data API](https://github.com/anshumax/world_bank_mcp_server)** - A server that fetches data indicators available with the World Bank as part of their data API\n- **[Wren Engine](https://github.com/Canner/wren-engine)** - The Semantic Engine for Model Context Protocol(MCP) Clients and AI Agents\n- **[X (Twitter)](https://github.com/EnesCinr/twitter-mcp)** (by EnesCinr) - Interact with twitter API. Post tweets and search for tweets by query.\n- **[X (Twitter)](https://github.com/vidhupv/x-mcp)** (by vidhupv) - Create, manage and publish X/Twitter posts directly through Claude chat.\n- **[Xcode](https://github.com/r-huijts/xcode-mcp-server)** - MCP server that brings AI to your Xcode projects, enabling intelligent code assistance, file operations, project management, and automated development tasks.\n- **[Xcode-mcp-server](https://github.com/drewster99/xcode-mcp-server)** (by drewster99) - Best Xcode integration - ClaudeCode and Cursor can build your project *with* Xcode and see the same errors you do. Fast easy setup.\n- **[xcodebuild](https://github.com/ShenghaiWang/xcodebuild)**  - 🍎 Build iOS Xcode workspace/project and feed back errors to llm.\n- **[Xero-mcp-server](https://github.com/john-zhang-dev/xero-mcp)** - Enabling clients to interact with Xero system for streamlined accounting, invoicing, and business operations.\n- **[Xero-mcp-server](https://github.com/XeroAPI/xero-mcp-server)** - Enabling clients to interact with Xero system for streamlined accounting, invoicing, and business operations.\n- **[XiYan](https://github.com/XGenerationLab/xiyan_mcp_server)** - 🗄️ An MCP server that supports fetching data from a database using natural language queries, powered by XiyanSQL as the text-to-SQL LLM.\n- **[XMind](https://github.com/apeyroux/mcp-xmind)** - Read and search through your XMind directory containing XMind files.\n- **[Yahoo Finance](https://github.com/AgentX-ai/yahoo-finance-server)** - 📈 Lets your AI interact with Yahoo Finance to get comprehensive stock market data, news, financials, and more. Proxy supported.\n- **[YetiBrowser MCP](https://github.com/yetidevworks/yetibrowser-mcp)** - A fully open-source implementation of the Browser MCP workflow with standout features such as optimized screenshots, dom diffs, console access, multi-websocket support + more.\n- **[yfinance](https://github.com/Adity-star/mcp-yfinance-server)** -💹The MCP YFinance Stock Server provides real-time and historical stock data in a standard format, powering dashboards, AI agents,and research tools with seamless financial insights.\n- **[YNAB](https://github.com/ChuckBryan/ynabmcpserver)** - A Model Context Protocol (MCP) server for integrating with YNAB (You Need A Budget), allowing AI assistants to securely access and analyze your financial data.\n- **[YouTrack](https://github.com/tonyzorin/youtrack-mcp)** - A Model Context Protocol (MCP) server implementation for JetBrains YouTrack, allowing AI assistants to interact with YouTrack issue tracking system.\n- **[YouTube](https://github.com/Klavis-AI/klavis/tree/main/mcp_servers/youtube)** - Extract Youtube video information (with proxies support).\n- **[YouTube](https://github.com/ZubeidHendricks/youtube-mcp-server)** - Comprehensive YouTube API integration for video management, Shorts creation, and analytics.\n- **[YouTube DLP](https://github.com/AgentX-ai/youtube-dlp-server)** - Retrieve video information, subtitles, and top comments with proxies.\n- **[YouTube MCP](https://github.com/aardeshir/youtube-mcp)** - Create playlists from song lists with OAuth2. Search videos, manage playlists, let AI curate your YouTube collections.\n- **[Youtube Uploader MCP](https://github.com/anwerj/youtube-uploader-mcp)** - AI‑powered YouTube uploader—no CLI, no YouTube Studio.\n- **[YouTube Video Summarizer](https://github.com/nabid-pf/youtube-video-summarizer-mcp)** - Summarize lengthy youtube videos.\n- **[yutu](https://github.com/eat-pray-ai/yutu)** - A fully functional MCP server and CLI for YouTube to automate YouTube operation.\n- **[ZapCap](https://github.com/bogdan01m/zapcap-mcp-server)** - MCP server for ZapCap API providing video caption and B-roll generation via natural language\n- **[Zettelkasten](https://github.com/joshylchen/zettelkasten)**- Comprehensive AI-powered knowledge management system implementing the Zettelkasten method. Features atomic note creation, full-text search, AI-powered CEQRC workflows (Capture→Explain→Question→Refine→Connect), intelligent link discovery, and multi-interface access (CLI, API, Web UI, MCP). Perfect for researchers, students, and knowledge workers.\n- **[ZincBind](https://github.com/QuentinCody/zincbind-mcp-server)** - Unofficial MCP server for ZincBind, providing access to a comprehensive database of zinc binding sites in proteins, structural coordination data, and metalloproteomics research information.\n- **[Zoom](https://github.com/Prathamesh0901/zoom-mcp-server/tree/main)** - Create, update, read and delete your zoom meetings.\n## 📚 Frameworks\n\nThese are high-level frameworks that make it easier to build MCP servers or clients.\n\n### For servers\n\n* **[Anubis MCP](https://github.com/zoedsoupe/anubis-mcp)** (Elixir) - A high-performance and high-level Model Context Protocol (MCP) implementation in Elixir. Think like \"Live View\" for MCP.\n* **[ModelFetch](https://github.com/phuctm97/modelfetch/)** (TypeScript) - Runtime-agnostic SDK to create and deploy MCP servers anywhere TypeScript/JavaScript runs\n* **[EasyMCP](https://github.com/zcaceres/easy-mcp/)** (TypeScript)\n* **[FastAPI to MCP auto generator](https://github.com/tadata-org/fastapi_mcp)** – A zero-configuration tool for automatically exposing FastAPI endpoints as MCP tools by **[Tadata](https://tadata.com/)**\n* **[FastMCP](https://github.com/punkpeye/fastmcp)** (TypeScript)\n* **[Foobara MCP Connector](https://github.com/foobara/mcp-connector)** - Easily expose Foobara commands written in Ruby as tools via MCP\n* **[Foxy Contexts](https://github.com/strowk/foxy-contexts)** – A library to build MCP servers in Golang by **[strowk](https://github.com/strowk)**\n* **[Higress MCP Server Hosting](https://github.com/alibaba/higress/tree/main/plugins/wasm-go/mcp-servers)** - A solution for hosting MCP Servers by extending the API Gateway (based on Envoy) with wasm plugins.\n* **[MCP Declarative Java SDK](https://github.com/codeboyzhou/mcp-declarative-java-sdk)** Annotation-driven MCP servers development with Java, no Spring Framework Required, minimize dependencies as much as possible.\n* **[MCP-Framework](https://mcp-framework.com)** Build MCP servers with elegance and speed in TypeScript. Comes with a CLI to create your project with `mcp create app`. Get started with your first server in under 5 minutes by **[Alex Andru](https://github.com/QuantGeekDev)**\n* **[MCP Plexus](https://github.com/Super-I-Tech/mcp_plexus)**: A secure, **multi-tenant** and Multi-user MCP python server framework built to integrate easily with external services via OAuth 2.1, offering scalable and robust solutions for managing complex AI applications.\n* **[mcp_sse (Elixir)](https://github.com/kEND/mcp_sse)** An SSE implementation in Elixir for rapidly creating MCP servers.\n* **[mxcp](https://github.com/raw-labs/mxcp)** (Python) - Open-source framework for building enterprise-grade MCP servers using just YAML, SQL, and Python, with built-in auth, monitoring, ETL and policy enforcement.\n* **[Next.js MCP Server Template](https://github.com/vercel-labs/mcp-for-next.js)** (Typescript) - A starter Next.js project that uses the MCP Adapter to allow MCP clients to connect and access resources.\n* **[PayMCP](https://github.com/blustAI/paymcp)** (Python & TypeScript) - Lightweight payments layer for MCP servers: turn tools into paid endpoints with a two-line decorator. [PyPI](https://pypi.org/project/paymcp/) · [npm](https://www.npmjs.com/package/paymcp) · [TS repo](https://github.com/blustAI/paymcp-ts)\n* **[Perl SDK](https://github.com/mojolicious/mojo-mcp)** - An SDK for building MCP servers and clients with the Perl programming language.\n* **[Quarkus MCP Server SDK](https://github.com/quarkiverse/quarkus-mcp-server)** (Java)\n- **[R mcptools](https://github.com/posit-dev/mcptools)** - An R SDK for creating R-based MCP servers and retrieving functionality from third-party MCP servers as R functions.\n* **[SAP ABAP MCP Server SDK](https://github.com/abap-ai/mcp)** - Build SAP ABAP based MCP servers. ABAP 7.52 based with 7.02 downport; runs on R/3 & S/4HANA on-premises, currently not cloud-ready.\n* **[Spring AI MCP Server](https://docs.spring.io/spring-ai/reference/api/mcp/mcp-server-boot-starter-docs.html)** - Provides auto-configuration for setting up an MCP server in Spring Boot applications.\n* **[Template MCP Server](https://github.com/mcpdotdirect/template-mcp-server)** - A CLI tool to create a new Model Context Protocol server project with TypeScript support, dual transport options, and an extensible structure\n* **[AgentR Universal MCP SDK](https://github.com/universal-mcp/universal-mcp)** - A python SDK to build MCP Servers with inbuilt credential management by **[Agentr](https://agentr.dev/home)**\n* **[Vercel MCP Adapter](https://github.com/vercel/mcp-adapter)** (TypeScript) - A simple package to start serving an MCP server on most major JS meta-frameworks including Next, Nuxt, Svelte, and more.\n* **[PHP MCP Server](https://github.com/php-mcp/server)** (PHP) - Core PHP implementation for the Model Context Protocol (MCP) server\n\n### For clients\n\n* **[codemirror-mcp](https://github.com/marimo-team/codemirror-mcp)** - CodeMirror extension that implements the Model Context Protocol (MCP) for resource mentions and prompt commands\n* **[llm-analysis-assistant](https://github.com/xuzexin-hz/llm-analysis-assistant)** <img height=\"12\" width=\"12\" src=\"https://raw.githubusercontent.com/xuzexin-hz/llm-analysis-assistant/refs/heads/main/src/llm_analysis_assistant/pages/html/imgs/favicon.ico\" alt=\"Langfuse Logo\" /> - A very streamlined mcp client that supports calling and monitoring stdio/sse/streamableHttp, and can also view request responses through the /logs page. It also supports monitoring and simulation of ollama/openai interface.\n* **[MCP-Agent](https://github.com/lastmile-ai/mcp-agent)** - A simple, composable framework to build agents using Model Context Protocol by **[LastMile AI](https://www.lastmileai.dev)**\n* **[Spring AI MCP Client](https://docs.spring.io/spring-ai/reference/api/mcp/mcp-client-boot-starter-docs.html)** - Provides auto-configuration for MCP client functionality in Spring Boot applications.\n* **[MCP CLI Client](https://github.com/vincent-pli/mcp-cli-host)** - A CLI host application that enables Large Language Models (LLMs) to interact with external tools through the Model Context Protocol (MCP).\n* **[OpenMCP Client](https://github.com/LSTM-Kirigaya/openmcp-client/)** - An all-in-one vscode/trae/cursor plugin for MCP server debugging. [Document](https://kirigaya.cn/openmcp/) & [OpenMCP SDK](https://kirigaya.cn/openmcp/sdk-tutorial/).\n* **[PHP MCP Client](https://github.com/php-mcp/client)** - Core PHP implementation for the Model Context Protocol (MCP) Client\n* **[Runbear](https://runbear.io/solutions/integrations/slack/mcp)** - No-code MCP client for team chat platforms, such as Slack, Microsoft Teams, and Discord.\n\n## 📚 Resources\n\nAdditional resources on MCP.\n\n- **[A2A-MCP Java Bridge](https://github.com/vishalmysore/a2ajava)** - A2AJava brings powerful A2A-MCP integration directly into your Java applications. It enables developers to annotate standard Java methods and instantly expose them as MCP Server, A2A-discoverable actions — with no boilerplate or service registration overhead.\n- **[AiMCP](https://www.aimcp.info)** - A collection of MCP clients&servers to find the right mcp tools by **[Hekmon](https://github.com/hekmon8)**\n- **[Awesome Crypto MCP Servers by badkk](https://github.com/badkk/awesome-crypto-mcp-servers)** - A curated list of MCP servers by **[Luke Fan](https://github.com/badkk)**\n- **[Awesome MCP Servers by appcypher](https://github.com/appcypher/awesome-mcp-servers)** - A curated list of MCP servers by **[Stephen Akinyemi](https://github.com/appcypher)**\n- **[Awesome MCP Servers by punkpeye](https://github.com/punkpeye/awesome-mcp-servers)** (**[website](https://glama.ai/mcp/servers)**) - A curated list of MCP servers by **[Frank Fiegel](https://github.com/punkpeye)**\n- **[Awesome MCP Servers by wong2](https://github.com/wong2/awesome-mcp-servers)** (**[website](https://mcpservers.org)**) - A curated list of MCP servers by **[wong2](https://github.com/wong2)**\n- **[Awesome Remote MCP Servers by JAW9C](https://github.com/jaw9c/awesome-remote-mcp-servers)** - A curated list of **remote** MCP servers, including their authentication support by **[JAW9C](https://github.com/jaw9c)**\n- **[Discord Server](https://glama.ai/mcp/discord)** – A community discord server dedicated to MCP by **[Frank Fiegel](https://github.com/punkpeye)**\n- **[Install This MCP](https://installthismcp.com)** - Reduce Installation Friction with beautiful installation guides\n- <img height=\"12\" width=\"12\" src=\"https://raw.githubusercontent.com/klavis-ai/klavis/main/static/klavis-ai.png\" alt=\"Klavis Logo\" /> **[Klavis AI](https://www.klavis.ai)** - Open Source MCP Infra. Hosted MCP servers and MCP clients on Slack and Discord.\n- **[MCP Badges](https://github.com/mcpx-dev/mcp-badges)** – Quickly highlight your MCP project with clear, eye-catching badges, by **[Ironben](https://github.com/nanbingxyz)**\n- <img height=\"12\" width=\"12\" src=\"https://mcpproxy.app/favicon.svg\" alt=\"MCPProxy Logo\" /> **[MCPProxy](https://github.com/smart-mcp-proxy/mcpproxy-go)** - Open-source local app that enables access to multiple MCP servers and thousands of tools with intelligent discovery via MCP protocol, runs servers in isolated environments, and features automatic quarantine protection against malicious tools.\n- **[MCPRepository.com](https://mcprepository.com/)** - A repository that indexes and organizes all MCP servers for easy discovery.\n- **[mcp-cli](https://github.com/wong2/mcp-cli)** - A CLI inspector for the Model Context Protocol by **[wong2](https://github.com/wong2)**\n- **[mcp-dockmaster](https://mcp-dockmaster.com)** - An Open-Sourced UI to install and manage MCP servers for Windows, Linux and macOS.\n- **[mcp-get](https://mcp-get.com)** - Command line tool for installing and managing MCP servers by **[Michael Latman](https://github.com/michaellatman)**\n- **[mcp-guardian](https://github.com/eqtylab/mcp-guardian)** - GUI application + tools for proxying / managing control of MCP servers by **[EQTY Lab](https://eqtylab.io)**\n- **[MCP Linker](https://github.com/milisp/mcp-linker)** - A cross-platform Tauri GUI tool for one-click setup and management of MCP servers, supporting Claude Desktop, Cursor, Windsurf, VS Code, Cline, and Neovim.\n- **[mcp-manager](https://github.com/zueai/mcp-manager)** - Simple Web UI to install and manage MCP servers for Claude Desktop by **[Zue](https://github.com/zueai)**\n- **[MCP Marketplace Web Plugin](https://github.com/AI-Agent-Hub/mcp-marketplace)** MCP Marketplace is a small Web UX plugin to integrate with AI applications, Support various MCP Server API Endpoint (e.g pulsemcp.com/deepnlp.org and more). Allowing user to browse, paginate and select various MCP servers by different categories. [Pypi](https://pypi.org/project/mcp-marketplace) | [Maintainer](https://github.com/AI-Agent-Hub) | [Website](http://www.deepnlp.org/store/ai-agent/mcp-server)\n- **[mcp.natoma.ai](https://mcp.natoma.ai)** – A Hosted MCP Platform to discover, install, manage and deploy MCP servers by **[Natoma Labs](https://www.natoma.ai)**\n- **[mcp.run](https://mcp.run)** - A hosted registry and control plane to install & run secure + portable MCP Servers.\n- **[MCPHub](https://www.mcphub.com)** - Website to list high quality MCP servers and reviews by real users. Also provide online chatbot for popular LLM models with MCP server support.\n- **[MCP Router](https://mcp-router.net)** – Free Windows and macOS app that simplifies MCP management while providing seamless app authentication and powerful log visualization by **[MCP Router](https://github.com/mcp-router/mcp-router)**\n- **[MCP Servers Hub](https://github.com/apappascs/mcp-servers-hub)** (**[website](https://mcp-servers-hub-website.pages.dev/)**) - A curated list of MCP servers by **[apappascs](https://github.com/apappascs)**\n- **[MCPServers.com](https://mcpservers.com)** - A growing directory of high-quality MCP servers with clear setup guides for a variety of MCP clients. Built by the team behind the **[Highlight MCP client](https://highlightai.com/)**\n- **[MCP Servers Rating and User Reviews](http://www.deepnlp.org/store/ai-agent/mcp-server)** - Website to rate MCP servers, write authentic user reviews, and [search engine for agent & mcp](http://www.deepnlp.org/search/agent)\n- **[MCP Sky](https://bsky.app/profile/brianell.in/feed/mcp)** - Bluesky feed for MCP related news and discussion by **[@brianell.in](https://bsky.app/profile/brianell.in)**\n- **[MCP X Community](https://x.com/i/communities/1861891349609603310)** – A X community for MCP by **[Xiaoyi](https://x.com/chxy)**\n- **[MCPHub](https://github.com/Jeamee/MCPHub-Desktop)** – An Open Source macOS & Windows GUI Desktop app for discovering, installing and managing MCP servers by **[Jeamee](https://github.com/jeamee)**\n- **[mcpm](https://github.com/pathintegral-institute/mcpm.sh)** ([website](https://mcpm.sh)) - MCP Manager (MCPM) is a Homebrew-like service for managing Model Context Protocol (MCP) servers across clients by **[Pathintegral](https://github.com/pathintegral-institute)**\n- **[MCPVerse](https://mcpverse.dev)** - A portal for creating & hosting authenticated MCP servers and connecting to them securely.\n- **[MCP Servers Search](https://github.com/atonomus/mcp-servers-search)** - An MCP server that provides tools for querying and discovering available MCP servers from this list.\n- **[Search MCP Server](https://github.com/krzysztofkucmierz/search-mcp-server)** - Recommends the most relevant MCP servers based on the client's query by searching this README file.\n- **[MCPWatch](https://github.com/kapilduraphe/mcp-watch)** - A comprehensive security scanner for Model Context Protocol (MCP) servers that detects vulnerabilities and security issues in your MCP server implementations.\n- <img height=\"12\" width=\"12\" src=\"https://mkinf.io/favicon-lilac.png\" alt=\"mkinf Logo\" /> **[mkinf](https://mkinf.io)** - An Open Source registry of hosted MCP Servers to accelerate AI agent workflows.\n- **[Open-Sourced MCP Servers Directory](https://github.com/chatmcp/mcp-directory)** - A curated list of MCP servers by **[mcpso](https://mcp.so)**\n- <img height=\"12\" width=\"12\" src=\"https://opentools.com/favicon.ico\" alt=\"OpenTools Logo\" /> **[OpenTools](https://opentools.com)** - An open registry for finding, installing, and building with MCP servers by **[opentoolsteam](https://github.com/opentoolsteam)**\n- **[Programmatic MCP Prototype](https://github.com/domdomegg/programmatic-mcp-prototype)** - Experimental agent prototype demonstrating programmatic MCP tool composition, progressive tool discovery, state persistence, and skill building through TypeScript code execution by **[Adam Jones](https://github.com/domdomegg)**\n- **[PulseMCP](https://www.pulsemcp.com)** ([API](https://www.pulsemcp.com/api)) - Community hub & weekly newsletter for discovering MCP servers, clients, articles, and news by **[Tadas Antanavicius](https://github.com/tadasant)**, **[Mike Coughlin](https://github.com/macoughl)**, and **[Ravina Patel](https://github.com/ravinahp)**\n- **[r/mcp](https://www.reddit.com/r/mcp)** – A Reddit community dedicated to MCP by **[Frank Fiegel](https://github.com/punkpeye)**\n- **[MCP.ing](https://mcp.ing/)** - A list of MCP services for discovering MCP servers in the community and providing a convenient search function for MCP services by **[iiiusky](https://github.com/iiiusky)**\n- **[MCP Hunt](https://mcp-hunt.com)** - Realtime platform for discovering trending MCP servers with momentum tracking, upvoting, and community discussions - like Product Hunt meets Reddit for MCP\n- **[Smithery](https://smithery.ai/)** - A registry of MCP servers to find the right tools for your LLM agents by **[Henry Mao](https://github.com/calclavia)**\n- **[Toolbase](https://gettoolbase.ai)** - Desktop application that manages tools and MCP servers with just a few clicks - no coding required by **[gching](https://github.com/gching)**\n- **[ToolHive](https://github.com/StacklokLabs/toolhive)** - A lightweight utility designed to simplify the deployment and management of MCP servers, ensuring ease of use, consistency, and security through containerization by **[StacklokLabs](https://github.com/StacklokLabs)**\n- **[NetMind](https://www.netmind.ai/AIServices)** - Access powerful AI services via simple APIs or MCP servers to supercharge your productivity.\n- **[Webrix MCP Gateway](https://github.com/webrix-ai/secure-mcp-gateway)** - Enterprise MCP gateway with SSO, RBAC, audit trails, and token vaults for secure, centralized AI agent access control. Deploy via Helm charts on-premise or in your cloud. [webrix.ai](https://webrix.ai)\n\n\n\n## 🚀 Getting Started\n\n### Using MCP Servers in this Repository\nTypeScript-based servers in this repository can be used directly with `npx`.\n\nFor example, this will start the [Memory](src/memory) server:\n```sh\nnpx -y @modelcontextprotocol/server-memory\n```\n\nPython-based servers in this repository can be used directly with [`uvx`](https://docs.astral.sh/uv/concepts/tools/) or [`pip`](https://pypi.org/project/pip/). `uvx` is recommended for ease of use and setup.\n\nFor example, this will start the [Git](src/git) server:\n```sh\n# With uvx\nuvx mcp-server-git\n\n# With pip\npip install mcp-server-git\npython -m mcp_server_git\n```\n\nFollow [these](https://docs.astral.sh/uv/getting-started/installation/) instructions to install `uv` / `uvx` and [these](https://pip.pypa.io/en/stable/installation/) to install `pip`.\n\n### Using an MCP Client\nHowever, running a server on its own isn't very useful, and should instead be configured into an MCP client. For example, here's the Claude Desktop configuration to use the above server:\n\n```json\n{\n  \"mcpServers\": {\n    \"memory\": {\n      \"command\": \"npx\",\n      \"args\": [\"-y\", \"@modelcontextprotocol/server-memory\"]\n    }\n  }\n}\n```\n\nAdditional examples of using the Claude Desktop as an MCP client might look like:\n\n```json\n{\n  \"mcpServers\": {\n    \"filesystem\": {\n      \"command\": \"npx\",\n      \"args\": [\"-y\", \"@modelcontextprotocol/server-filesystem\", \"/path/to/allowed/files\"]\n    },\n    \"git\": {\n      \"command\": \"uvx\",\n      \"args\": [\"mcp-server-git\", \"--repository\", \"path/to/git/repo\"]\n    },\n    \"github\": {\n      \"command\": \"npx\",\n      \"args\": [\"-y\", \"@modelcontextprotocol/server-github\"],\n      \"env\": {\n        \"GITHUB_PERSONAL_ACCESS_TOKEN\": \"<YOUR_TOKEN>\"\n      }\n    },\n    \"postgres\": {\n      \"command\": \"npx\",\n      \"args\": [\"-y\", \"@modelcontextprotocol/server-postgres\", \"postgresql://localhost/mydb\"]\n    }\n  }\n}\n```\n\n## 🛠️ Creating Your Own Server\n\nInterested in creating your own MCP server? Visit the official documentation at [modelcontextprotocol.io](https://modelcontextprotocol.io/introduction) for comprehensive guides, best practices, and technical details on implementing MCP servers.\n\n## 🤝 Contributing\n\nSee [CONTRIBUTING.md](CONTRIBUTING.md) for information about contributing to this repository.\n\n## 🔒 Security\n\nSee [SECURITY.md](SECURITY.md) for reporting security vulnerabilities.\n\n## 📜 License\n\nThis project is licensed under the Apache License, Version 2.0 for new contributions, with existing code under MIT - see the [LICENSE](LICENSE) file for details.\n\n## 💬 Community\n\n- [GitHub Discussions](https://github.com/orgs/modelcontextprotocol/discussions)\n\n## ⭐ Support\n\nIf you find MCP servers useful, please consider starring the repository and contributing new servers or improvements!\n\n---\n\nManaged by Anthropic, but built together with the community. The Model Context Protocol is open source and we encourage everyone to contribute their own servers and improvements!\n"
  },
  {
    "path": "SECURITY.md",
    "content": "# Security Policy\n\nThank you for helping keep the Model Context Protocol and its ecosystem secure.\n\n## Important Notice\n\nThe servers in this repository are **reference implementations** intended to demonstrate\nMCP features and SDK usage. They serve as educational examples for developers building\ntheir own MCP servers, not as production-ready solutions.\n\nThis repository is **not** eligible for security vulnerability reporting. If you discover\na vulnerability in an MCP SDK, please report it in the appropriate SDK repository.\n\n## Reporting Security Issues in MCP SDKs\n\nIf you discover a security vulnerability in an MCP SDK, please report it through the\n[GitHub Security Advisory process](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability)\nin the relevant SDK repository.\n\nPlease **do not** report security vulnerabilities through public GitHub issues, discussions,\nor pull requests.\n"
  },
  {
    "path": "package.json",
    "content": "{\n  \"name\": \"@modelcontextprotocol/servers\",\n  \"private\": true,\n  \"version\": \"0.6.2\",\n  \"description\": \"Model Context Protocol servers\",\n  \"license\": \"SEE LICENSE IN LICENSE\",\n  \"author\": \"Model Context Protocol a Series of LF Projects, LLC.\",\n  \"homepage\": \"https://modelcontextprotocol.io\",\n  \"bugs\": \"https://github.com/modelcontextprotocol/servers/issues\",\n  \"type\": \"module\",\n  \"workspaces\": [\n    \"src/*\"\n  ],\n  \"files\": [],\n  \"scripts\": {\n    \"build\": \"npm run build --workspaces\",\n    \"watch\": \"npm run watch --workspaces\",\n    \"publish-all\": \"npm publish --workspaces --access public\",\n    \"link-all\": \"npm link --workspaces\"\n  },\n  \"dependencies\": {\n    \"@modelcontextprotocol/server-everything\": \"*\",\n    \"@modelcontextprotocol/server-memory\": \"*\",\n    \"@modelcontextprotocol/server-filesystem\": \"*\",\n    \"@modelcontextprotocol/server-sequential-thinking\": \"*\"\n  }\n}\n"
  },
  {
    "path": "scripts/release.py",
    "content": "#!/usr/bin/env uv run --script\n# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n#     \"click>=8.1.8\",\n#     \"tomlkit>=0.13.2\"\n# ]\n# ///\nimport sys\nimport re\nimport click\nfrom pathlib import Path\nimport json\nimport tomlkit\nimport datetime\nimport subprocess\nfrom dataclasses import dataclass\nfrom typing import Any, Iterator, NewType, Protocol\n\n\nVersion = NewType(\"Version\", str)\nGitHash = NewType(\"GitHash\", str)\n\n\nclass GitHashParamType(click.ParamType):\n    name = \"git_hash\"\n\n    def convert(\n        self, value: Any, param: click.Parameter | None, ctx: click.Context | None\n    ) -> GitHash | None:\n        if value is None:\n            return None\n\n        if not (8 <= len(value) <= 40):\n            self.fail(f\"Git hash must be between 8 and 40 characters, got {len(value)}\")\n\n        if not re.match(r\"^[0-9a-fA-F]+$\", value):\n            self.fail(\"Git hash must contain only hex digits (0-9, a-f)\")\n\n        try:\n            # Verify hash exists in repo\n            subprocess.run(\n                [\"git\", \"rev-parse\", \"--verify\", value], check=True, capture_output=True\n            )\n        except subprocess.CalledProcessError:\n            self.fail(f\"Git hash {value} not found in repository\")\n\n        return GitHash(value.lower())\n\n\nGIT_HASH = GitHashParamType()\n\n\nclass Package(Protocol):\n    path: Path\n\n    def package_name(self) -> str: ...\n\n    def update_version(self, version: Version) -> None: ...\n\n\n@dataclass\nclass NpmPackage:\n    path: Path\n\n    def package_name(self) -> str:\n        with open(self.path / \"package.json\", \"r\") as f:\n            return json.load(f)[\"name\"]\n\n    def update_version(self, version: Version):\n        with open(self.path / \"package.json\", \"r+\") as f:\n            data = json.load(f)\n            data[\"version\"] = version\n            f.seek(0)\n            json.dump(data, f, indent=2)\n            f.truncate()\n\n\n@dataclass\nclass PyPiPackage:\n    path: Path\n\n    def package_name(self) -> str:\n        with open(self.path / \"pyproject.toml\") as f:\n            toml_data = tomlkit.parse(f.read())\n            name = toml_data.get(\"project\", {}).get(\"name\")\n            if not name:\n                raise Exception(\"No name in pyproject.toml project section\")\n            return str(name)\n\n    def update_version(self, version: Version):\n        # Update version in pyproject.toml\n        with open(self.path / \"pyproject.toml\") as f:\n            data = tomlkit.parse(f.read())\n            data[\"project\"][\"version\"] = version\n\n        with open(self.path / \"pyproject.toml\", \"w\") as f:\n            f.write(tomlkit.dumps(data))\n\n        # Regenerate uv.lock to match the updated pyproject.toml\n        subprocess.run([\"uv\", \"lock\"], cwd=self.path, check=True)\n\n\ndef has_changes(path: Path, git_hash: GitHash) -> bool:\n    \"\"\"Check if any files changed between current state and git hash\"\"\"\n    try:\n        output = subprocess.run(\n            [\"git\", \"diff\", \"--name-only\", git_hash, \"--\", \".\"],\n            cwd=path,\n            check=True,\n            capture_output=True,\n            text=True,\n        )\n\n        changed_files = [Path(f) for f in output.stdout.splitlines()]\n        relevant_files = [f for f in changed_files if f.suffix in [\".py\", \".ts\"]]\n        return len(relevant_files) >= 1\n    except subprocess.CalledProcessError:\n        return False\n\n\ndef gen_version() -> Version:\n    \"\"\"Generate version based on current date\"\"\"\n    now = datetime.datetime.now()\n    return Version(f\"{now.year}.{now.month}.{now.day}\")\n\n\ndef find_changed_packages(directory: Path, git_hash: GitHash) -> Iterator[Package]:\n    for path in directory.glob(\"*/package.json\"):\n        if has_changes(path.parent, git_hash):\n            yield NpmPackage(path.parent)\n    for path in directory.glob(\"*/pyproject.toml\"):\n        if has_changes(path.parent, git_hash):\n            yield PyPiPackage(path.parent)\n\n\n@click.group()\ndef cli():\n    pass\n\n\n@cli.command(\"update-packages\")\n@click.option(\n    \"--directory\", type=click.Path(exists=True, path_type=Path), default=Path.cwd()\n)\n@click.argument(\"git_hash\", type=GIT_HASH)\ndef update_packages(directory: Path, git_hash: GitHash) -> int:\n    # Detect package type\n    path = directory.resolve(strict=True)\n    version = gen_version()\n\n    for package in find_changed_packages(path, git_hash):\n        name = package.package_name()\n        package.update_version(version)\n\n        click.echo(f\"{name}@{version}\")\n\n    return 0\n\n\n@cli.command(\"generate-notes\")\n@click.option(\n    \"--directory\", type=click.Path(exists=True, path_type=Path), default=Path.cwd()\n)\n@click.argument(\"git_hash\", type=GIT_HASH)\ndef generate_notes(directory: Path, git_hash: GitHash) -> int:\n    # Detect package type\n    path = directory.resolve(strict=True)\n    version = gen_version()\n\n    click.echo(f\"# Release : v{version}\")\n    click.echo(\"\")\n    click.echo(\"## Updated packages\")\n    for package in find_changed_packages(path, git_hash):\n        name = package.package_name()\n        click.echo(f\"- {name}@{version}\")\n\n    return 0\n\n\n@cli.command(\"generate-version\")\ndef generate_version() -> int:\n    # Detect package type\n    click.echo(gen_version())\n    return 0\n\n\n@cli.command(\"generate-matrix\")\n@click.option(\n    \"--directory\", type=click.Path(exists=True, path_type=Path), default=Path.cwd()\n)\n@click.option(\"--npm\", is_flag=True, default=False)\n@click.option(\"--pypi\", is_flag=True, default=False)\n@click.argument(\"git_hash\", type=GIT_HASH)\ndef generate_matrix(directory: Path, git_hash: GitHash, pypi: bool, npm: bool) -> int:\n    # Detect package type\n    path = directory.resolve(strict=True)\n    version = gen_version()\n\n    changes = []\n    for package in find_changed_packages(path, git_hash):\n        pkg = package.path.relative_to(path)\n        if npm and isinstance(package, NpmPackage):\n            changes.append(str(pkg))\n        if pypi and isinstance(package, PyPiPackage):\n            changes.append(str(pkg))\n\n    click.echo(json.dumps(changes))\n    return 0\n\n\nif __name__ == \"__main__\":\n    sys.exit(cli())\n"
  },
  {
    "path": "src/everything/.prettierignore",
    "content": "packages\ndist\nREADME.md\nnode_modules\n"
  },
  {
    "path": "src/everything/AGENTS.md",
    "content": "# MCP \"Everything\" Server - Development Guidelines\n\n## Build, Test & Run Commands\n\n- Build: `npm run build` - Compiles TypeScript to JavaScript\n- Watch mode: `npm run watch` - Watches for changes and rebuilds automatically\n- Run STDIO server: `npm run start:stdio` - Starts the MCP server using stdio transport\n- Run SSE server: `npm run start:sse` - Starts the MCP server with SSE transport\n- Run StreamableHttp server: `npm run start:stremableHttp` - Starts the MCP server with StreamableHttp transport\n- Prepare release: `npm run prepare` - Builds the project for publishing\n\n## Code Style Guidelines\n\n- Use ES modules with `.js` extension in import paths\n- Strictly type all functions and variables with TypeScript\n- Follow zod schema patterns for tool input validation\n- Prefer async/await over callbacks and Promise chains\n- Place all imports at top of file, grouped by external then internal\n- Use descriptive variable names that clearly indicate purpose\n- Implement proper cleanup for timers and resources in server shutdown\n- Handle errors with try/catch blocks and provide clear error messages\n- Use consistent indentation (2 spaces) and trailing commas in multi-line objects\n- Match existing code style, import order, and module layout in the respective folder.\n- Use camelCase for variables/functions,\n- Use PascalCase for types/classes,\n- Use UPPER_CASE for constants\n- Use kebab-case for file names and registered tools, prompts, and resources.\n- Use verbs for tool names, e.g., `get-annotated-message` instead of `annotated-message`\n\n## Extending the Server\n\nThe Everything Server is designed to be extended at well-defined points.\nSee [Extension Points](docs/extension.md) and [Project Structure](docs/structure.md).\nThe server factory is `src/everything/server/index.ts` and registers all features during startup as well as handling post-connection setup.\n\n### High-level\n\n- Tools live under `src/everything/tools/` and are registered via `registerTools(server)`.\n- Resources live under `src/everything/resources/` and are registered via `registerResources(server)`.\n- Prompts live under `src/everything/prompts/` and are registered via `registerPrompts(server)`.\n- Subscriptions and simulated update routines are under `src/everything/resources/subscriptions.ts`.\n- Logging helpers are under `src/everything/server/logging.ts`.\n- Transport managers are under `src/everything/transports/`.\n\n### When adding a new feature\n\n- Follow the existing file/module pattern in its folder (naming, exports, and registration function).\n- Export a `registerX(server)` function that registers new items with the MCP SDK in the same style as existing ones.\n- Wire your new module into the central index (e.g., update `tools/index.ts`, `resources/index.ts`, or `prompts/index.ts`).\n- Ensure schemas (for tools) are accurate JSON Schema and include helpful descriptions and examples.\n  `server/index.ts` and usages in `logging.ts` and `subscriptions.ts`.\n- Keep the docs in `src/everything/docs/` up to date if you add or modify noteworthy features.\n"
  },
  {
    "path": "src/everything/Dockerfile",
    "content": "FROM node:22.12-alpine AS builder\n\nCOPY src/everything /app\nCOPY tsconfig.json /tsconfig.json\n\nWORKDIR /app\n\nRUN --mount=type=cache,target=/root/.npm npm install\n\nFROM node:22-alpine AS release\n\nWORKDIR /app\n\nCOPY --from=builder /app/dist /app/dist\nCOPY --from=builder /app/package.json /app/package.json\nCOPY --from=builder /app/package-lock.json /app/package-lock.json\n\nENV NODE_ENV=production\n\nRUN npm ci --ignore-scripts --omit-dev\n\nCMD [\"node\", \"dist/index.js\"]"
  },
  {
    "path": "src/everything/README.md",
    "content": "# Everything MCP Server\n**[Architecture](docs/architecture.md)\n| [Project Structure](docs/structure.md)\n| [Startup Process](docs/startup.md)\n| [Server Features](docs/features.md)\n| [Extension Points](docs/extension.md)\n| [How It Works](docs/how-it-works.md)**\n\n\nThis MCP server attempts to exercise all the features of the MCP protocol. It is not intended to be a useful server, but rather a test server for builders of MCP clients. It implements prompts, tools, resources, sampling, and more to showcase MCP capabilities.\n\n## Tools, Resources, Prompts, and Other Features\n\nA complete list of the registered MCP primitives and other protocol features demonstrated can be found in the [Server Features](docs/features.md) document.\n\n## Usage with Claude Desktop (uses [stdio Transport](https://modelcontextprotocol.io/specification/2025-03-26/basic/transports#stdio))\n\nAdd to your `claude_desktop_config.json`:\n\n```json\n{\n  \"mcpServers\": {\n    \"everything\": {\n      \"command\": \"npx\",\n      \"args\": [\n        \"-y\",\n        \"@modelcontextprotocol/server-everything\"\n      ]\n    }\n  }\n}\n```\n\n## Usage with VS Code\n\nFor quick installation, use of of the one-click install buttons below...\n\n[![Install with NPX in VS Code](https://img.shields.io/badge/VS_Code-NPM-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=everything&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%40modelcontextprotocol%2Fserver-everything%22%5D%7D) [![Install with NPX in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-NPM-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=everything&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%40modelcontextprotocol%2Fserver-everything%22%5D%7D&quality=insiders)\n\n[![Install with Docker in VS Code](https://img.shields.io/badge/VS_Code-Docker-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=everything&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22--rm%22%2C%22mcp%2Feverything%22%5D%7D) [![Install with Docker in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Docker-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=everything&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22--rm%22%2C%22mcp%2Feverything%22%5D%7D&quality=insiders)\n\nFor manual installation, you can configure the MCP server using one of these methods:\n\n**Method 1: User Configuration (Recommended)**\nAdd the configuration to your user-level MCP configuration file. Open the Command Palette (`Ctrl + Shift + P`) and run `MCP: Open User Configuration`. This will open your user `mcp.json` file where you can add the server configuration.\n\n**Method 2: Workspace Configuration**\nAlternatively, you can add the configuration to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others.\n\n> For more details about MCP configuration in VS Code, see the [official VS Code MCP documentation](https://code.visualstudio.com/docs/copilot/customization/mcp-servers).\n\n#### NPX\n\n```json\n{\n  \"servers\": {\n    \"everything\": {\n      \"command\": \"npx\",\n      \"args\": [\"-y\", \"@modelcontextprotocol/server-everything\"]\n    }\n  }\n}\n```\n\n## Running from source with [HTTP+SSE Transport](https://modelcontextprotocol.io/specification/2024-11-05/basic/transports#http-with-sse) (deprecated as of [2025-03-26](https://modelcontextprotocol.io/specification/2025-03-26/basic/transports))\n\n```shell\ncd src/everything\nnpm install\nnpm run start:sse\n```\n\n## Run from source with [Streamable HTTP Transport](https://modelcontextprotocol.io/specification/2025-03-26/basic/transports#streamable-http)\n\n```shell\ncd src/everything\nnpm install\nnpm run start:streamableHttp\n```\n\n## Running as an installed package\n### Install \n```shell\nnpm install -g @modelcontextprotocol/server-everything@latest\n````\n\n### Run the default (stdio) server\n```shell\nnpx @modelcontextprotocol/server-everything\n```\n\n### Or specify stdio explicitly\n```shell\nnpx @modelcontextprotocol/server-everything stdio\n```\n\n### Run the SSE server\n```shell\nnpx @modelcontextprotocol/server-everything sse\n```\n\n### Run the streamable HTTP server\n```shell\nnpx @modelcontextprotocol/server-everything streamableHttp\n```\n\n"
  },
  {
    "path": "src/everything/__tests__/prompts.test.ts",
    "content": "import { describe, it, expect, vi } from 'vitest';\nimport { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';\nimport { registerSimplePrompt } from '../prompts/simple.js';\nimport { registerArgumentsPrompt } from '../prompts/args.js';\nimport { registerPromptWithCompletions } from '../prompts/completions.js';\nimport { registerEmbeddedResourcePrompt } from '../prompts/resource.js';\n\n// Helper to capture registered prompt handlers\nfunction createMockServer() {\n  const handlers: Map<string, Function> = new Map();\n  const configs: Map<string, any> = new Map();\n\n  const mockServer = {\n    registerPrompt: vi.fn((name: string, config: any, handler: Function) => {\n      handlers.set(name, handler);\n      configs.set(name, config);\n    }),\n  } as unknown as McpServer;\n\n  return { mockServer, handlers, configs };\n}\n\ndescribe('Prompts', () => {\n  describe('simple-prompt', () => {\n    it('should return fixed message with no arguments', () => {\n      const { mockServer, handlers } = createMockServer();\n      registerSimplePrompt(mockServer);\n\n      const handler = handlers.get('simple-prompt')!;\n      const result = handler();\n\n      expect(result).toEqual({\n        messages: [\n          {\n            role: 'user',\n            content: {\n              type: 'text',\n              text: 'This is a simple prompt without arguments.',\n            },\n          },\n        ],\n      });\n    });\n  });\n\n  describe('args-prompt', () => {\n    it('should include city in message', () => {\n      const { mockServer, handlers } = createMockServer();\n      registerArgumentsPrompt(mockServer);\n\n      const handler = handlers.get('args-prompt')!;\n      const result = handler({ city: 'San Francisco' });\n\n      expect(result.messages[0].content.text).toBe(\"What's weather in San Francisco?\");\n    });\n\n    it('should include city and state in message', () => {\n      const { mockServer, handlers } = createMockServer();\n      registerArgumentsPrompt(mockServer);\n\n      const handler = handlers.get('args-prompt')!;\n      const result = handler({ city: 'San Francisco', state: 'California' });\n\n      expect(result.messages[0].content.text).toBe(\n        \"What's weather in San Francisco, California?\"\n      );\n    });\n\n    it('should handle city only (optional state omitted)', () => {\n      const { mockServer, handlers } = createMockServer();\n      registerArgumentsPrompt(mockServer);\n\n      const handler = handlers.get('args-prompt')!;\n      const result = handler({ city: 'New York' });\n\n      expect(result.messages[0].content.text).toBe(\"What's weather in New York?\");\n      expect(result.messages[0].content.text).not.toContain(',');\n      expect(result.messages[0].role).toBe('user');\n      expect(result.messages[0].content.type).toBe('text');\n    });\n  });\n\n  describe('completable-prompt', () => {\n    it('should generate promotion message with department and name', () => {\n      const { mockServer, handlers } = createMockServer();\n      registerPromptWithCompletions(mockServer);\n\n      const handler = handlers.get('completable-prompt')!;\n      const result = handler({ department: 'Engineering', name: 'Alice' });\n\n      expect(result.messages[0].content.text).toBe(\n        'Please promote Alice to the head of the Engineering team.'\n      );\n    });\n\n    it('should work with different departments', () => {\n      const { mockServer, handlers } = createMockServer();\n      registerPromptWithCompletions(mockServer);\n\n      const handler = handlers.get('completable-prompt')!;\n\n      const salesResult = handler({ department: 'Sales', name: 'David' });\n      expect(salesResult.messages[0].content.text).toContain('Sales');\n      expect(salesResult.messages[0].content.text).toContain('David');\n      expect(salesResult.messages[0].role).toBe('user');\n\n      const marketingResult = handler({ department: 'Marketing', name: 'Grace' });\n      expect(marketingResult.messages[0].content.text).toContain('Marketing');\n      expect(marketingResult.messages[0].content.text).toContain('Grace');\n    });\n  });\n\n  describe('resource-prompt', () => {\n    it('should return text resource reference', () => {\n      const { mockServer, handlers } = createMockServer();\n      registerEmbeddedResourcePrompt(mockServer);\n\n      const handler = handlers.get('resource-prompt')!;\n      const result = handler({ resourceType: 'Text', resourceId: '1' });\n\n      expect(result.messages).toHaveLength(2);\n      expect(result.messages[0].content.text).toContain('Text');\n      expect(result.messages[0].content.text).toContain('1');\n      expect(result.messages[1].content.type).toBe('resource');\n      expect(result.messages[1].content.resource.uri).toContain('text/1');\n    });\n\n    it('should return blob resource reference', () => {\n      const { mockServer, handlers } = createMockServer();\n      registerEmbeddedResourcePrompt(mockServer);\n\n      const handler = handlers.get('resource-prompt')!;\n      const result = handler({ resourceType: 'Blob', resourceId: '5' });\n\n      expect(result.messages[0].content.text).toContain('Blob');\n      expect(result.messages[1].content.resource.uri).toContain('blob/5');\n    });\n\n    it('should reject invalid resource type', () => {\n      const { mockServer, handlers } = createMockServer();\n      registerEmbeddedResourcePrompt(mockServer);\n\n      const handler = handlers.get('resource-prompt')!;\n      expect(() => handler({ resourceType: 'Invalid', resourceId: '1' })).toThrow(\n        'Invalid resourceType'\n      );\n    });\n\n    it('should reject invalid resource ID', () => {\n      const { mockServer, handlers } = createMockServer();\n      registerEmbeddedResourcePrompt(mockServer);\n\n      const handler = handlers.get('resource-prompt')!;\n      expect(() => handler({ resourceType: 'Text', resourceId: '-1' })).toThrow(\n        'Invalid resourceId'\n      );\n      expect(() => handler({ resourceType: 'Text', resourceId: '0' })).toThrow(\n        'Invalid resourceId'\n      );\n      expect(() => handler({ resourceType: 'Text', resourceId: 'abc' })).toThrow(\n        'Invalid resourceId'\n      );\n    });\n\n    it('should include both intro text and resource messages', () => {\n      const { mockServer, handlers } = createMockServer();\n      registerEmbeddedResourcePrompt(mockServer);\n\n      const handler = handlers.get('resource-prompt')!;\n      const result = handler({ resourceType: 'Text', resourceId: '3' });\n\n      expect(result.messages).toHaveLength(2);\n      expect(result.messages[0].role).toBe('user');\n      expect(result.messages[0].content.type).toBe('text');\n      expect(result.messages[1].role).toBe('user');\n      expect(result.messages[1].content.type).toBe('resource');\n    });\n  });\n});\n"
  },
  {
    "path": "src/everything/__tests__/registrations.test.ts",
    "content": "import { describe, it, expect, vi } from 'vitest';\nimport { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';\n\n// Create mock server\nfunction createMockServer() {\n  return {\n    registerTool: vi.fn(),\n    registerPrompt: vi.fn(),\n    registerResource: vi.fn(),\n    server: {\n      getClientCapabilities: vi.fn(() => ({})),\n      setRequestHandler: vi.fn(),\n    },\n    sendLoggingMessage: vi.fn(),\n    sendResourceUpdated: vi.fn(),\n  } as unknown as McpServer;\n}\n\ndescribe('Registration Index Files', () => {\n  describe('tools/index.ts', () => {\n    it('should register all standard tools', async () => {\n      const { registerTools } = await import('../tools/index.js');\n      const mockServer = createMockServer();\n\n      registerTools(mockServer);\n\n      // Should register 12 standard tools (non-conditional)\n      expect(mockServer.registerTool).toHaveBeenCalledTimes(12);\n\n      // Verify specific tools are registered\n      const registeredTools = (mockServer.registerTool as any).mock.calls.map(\n        (call: any[]) => call[0]\n      );\n      expect(registeredTools).toContain('echo');\n      expect(registeredTools).toContain('get-sum');\n      expect(registeredTools).toContain('get-env');\n      expect(registeredTools).toContain('get-tiny-image');\n      expect(registeredTools).toContain('get-structured-content');\n      expect(registeredTools).toContain('get-annotated-message');\n      expect(registeredTools).toContain('trigger-long-running-operation');\n      expect(registeredTools).toContain('get-resource-links');\n      expect(registeredTools).toContain('get-resource-reference');\n      expect(registeredTools).toContain('gzip-file-as-resource');\n      expect(registeredTools).toContain('toggle-simulated-logging');\n      expect(registeredTools).toContain('toggle-subscriber-updates');\n    });\n\n    it('should register conditional tools based on capabilities', async () => {\n      const { registerConditionalTools } = await import('../tools/index.js');\n\n      // Server with all capabilities including experimental tasks API\n      const mockServerWithCapabilities = {\n        registerTool: vi.fn(),\n        server: {\n          getClientCapabilities: vi.fn(() => ({\n            roots: {},\n            elicitation: {},\n            sampling: {},\n          })),\n        },\n        experimental: {\n          tasks: {\n            registerToolTask: vi.fn(),\n          },\n        },\n      } as unknown as McpServer;\n\n      registerConditionalTools(mockServerWithCapabilities);\n\n      // Should register 3 conditional tools + 3 task-based tools when all capabilities present\n      expect(mockServerWithCapabilities.registerTool).toHaveBeenCalledTimes(3);\n\n      const registeredTools = (\n        mockServerWithCapabilities.registerTool as any\n      ).mock.calls.map((call: any[]) => call[0]);\n      expect(registeredTools).toContain('get-roots-list');\n      expect(registeredTools).toContain('trigger-elicitation-request');\n      expect(registeredTools).toContain('trigger-sampling-request');\n\n      // Task-based tools are registered via experimental.tasks.registerToolTask\n      expect(mockServerWithCapabilities.experimental.tasks.registerToolTask).toHaveBeenCalled();\n    });\n\n    it('should not register conditional tools when capabilities missing', async () => {\n      const { registerConditionalTools } = await import('../tools/index.js');\n\n      const mockServerNoCapabilities = {\n        registerTool: vi.fn(),\n        server: {\n          getClientCapabilities: vi.fn(() => ({})),\n        },\n        experimental: {\n          tasks: {\n            registerToolTask: vi.fn(),\n          },\n        },\n      } as unknown as McpServer;\n\n      registerConditionalTools(mockServerNoCapabilities);\n\n      // Should not register any capability-gated tools when capabilities are missing\n      expect(mockServerNoCapabilities.registerTool).not.toHaveBeenCalled();\n    });\n  });\n\n  describe('prompts/index.ts', () => {\n    it('should register all prompts', async () => {\n      const { registerPrompts } = await import('../prompts/index.js');\n      const mockServer = createMockServer();\n\n      registerPrompts(mockServer);\n\n      // Should register 4 prompts\n      expect(mockServer.registerPrompt).toHaveBeenCalledTimes(4);\n\n      const registeredPrompts = (mockServer.registerPrompt as any).mock.calls.map(\n        (call: any[]) => call[0]\n      );\n      expect(registeredPrompts).toContain('simple-prompt');\n      expect(registeredPrompts).toContain('args-prompt');\n      expect(registeredPrompts).toContain('completable-prompt');\n      expect(registeredPrompts).toContain('resource-prompt');\n    });\n  });\n\n  describe('resources/index.ts', () => {\n    it('should register resource templates', async () => {\n      const { registerResources } = await import('../resources/index.js');\n      const mockServer = createMockServer();\n\n      registerResources(mockServer);\n\n      // Should register at least the 2 resource templates (text and blob) plus file resources\n      expect(mockServer.registerResource).toHaveBeenCalled();\n      const registeredResources = (mockServer.registerResource as any).mock.calls.map(\n        (call: any[]) => call[0]\n      );\n      expect(registeredResources).toContain('Dynamic Text Resource');\n      expect(registeredResources).toContain('Dynamic Blob Resource');\n    });\n\n    it('should read instructions from file', async () => {\n      const { readInstructions } = await import('../resources/index.js');\n\n      const instructions = readInstructions();\n\n      // Should return a string (either content or error message)\n      expect(typeof instructions).toBe('string');\n      expect(instructions.length).toBeGreaterThan(0);\n    });\n  });\n});\n"
  },
  {
    "path": "src/everything/__tests__/resources.test.ts",
    "content": "import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';\nimport { McpServer, ResourceTemplate } from '@modelcontextprotocol/sdk/server/mcp.js';\nimport {\n  textResource,\n  blobResource,\n  textResourceUri,\n  blobResourceUri,\n  RESOURCE_TYPE_TEXT,\n  RESOURCE_TYPE_BLOB,\n  RESOURCE_TYPES,\n  resourceTypeCompleter,\n  resourceIdForPromptCompleter,\n  resourceIdForResourceTemplateCompleter,\n  registerResourceTemplates,\n} from '../resources/templates.js';\nimport {\n  getSessionResourceURI,\n  registerSessionResource,\n} from '../resources/session.js';\nimport { registerFileResources } from '../resources/files.js';\nimport {\n  setSubscriptionHandlers,\n  beginSimulatedResourceUpdates,\n  stopSimulatedResourceUpdates,\n} from '../resources/subscriptions.js';\n\ndescribe('Resource Templates', () => {\n  describe('Constants', () => {\n    it('should include both types in RESOURCE_TYPES array', () => {\n      expect(RESOURCE_TYPES).toContain(RESOURCE_TYPE_TEXT);\n      expect(RESOURCE_TYPES).toContain(RESOURCE_TYPE_BLOB);\n      expect(RESOURCE_TYPES).toHaveLength(2);\n    });\n  });\n\n  describe('textResourceUri', () => {\n    it('should create URL for text resource', () => {\n      const uri = textResourceUri(1);\n      expect(uri.toString()).toBe('demo://resource/dynamic/text/1');\n    });\n\n    it('should handle different resource IDs', () => {\n      expect(textResourceUri(5).toString()).toBe('demo://resource/dynamic/text/5');\n      expect(textResourceUri(100).toString()).toBe('demo://resource/dynamic/text/100');\n    });\n  });\n\n  describe('blobResourceUri', () => {\n    it('should create URL for blob resource', () => {\n      const uri = blobResourceUri(1);\n      expect(uri.toString()).toBe('demo://resource/dynamic/blob/1');\n    });\n\n    it('should handle different resource IDs', () => {\n      expect(blobResourceUri(5).toString()).toBe('demo://resource/dynamic/blob/5');\n      expect(blobResourceUri(100).toString()).toBe('demo://resource/dynamic/blob/100');\n    });\n  });\n\n  describe('textResource', () => {\n    it('should create text resource with correct structure', () => {\n      const uri = textResourceUri(1);\n      const resource = textResource(uri, 1);\n\n      expect(resource.uri).toBe(uri.toString());\n      expect(resource.mimeType).toBe('text/plain');\n      expect(resource.text).toContain('Resource 1');\n      expect(resource.text).toContain('plaintext');\n    });\n\n    it('should include timestamp in content', () => {\n      const uri = textResourceUri(2);\n      const resource = textResource(uri, 2);\n\n      // Timestamp format varies, just check it contains time-related content\n      expect(resource.text).toMatch(/\\d/);\n    });\n  });\n\n  describe('blobResource', () => {\n    it('should create blob resource with correct structure', () => {\n      const uri = blobResourceUri(1);\n      const resource = blobResource(uri, 1);\n\n      expect(resource.uri).toBe(uri.toString());\n      expect(resource.mimeType).toBe('text/plain');\n      expect(resource.blob).toBeDefined();\n    });\n\n    it('should create valid base64 encoded content', () => {\n      const uri = blobResourceUri(3);\n      const resource = blobResource(uri, 3);\n\n      // Decode and verify content\n      const decoded = Buffer.from(resource.blob, 'base64').toString();\n      expect(decoded).toContain('Resource 3');\n      expect(decoded).toContain('base64 blob');\n    });\n  });\n\n  describe('resourceTypeCompleter', () => {\n    it('should be defined as a completable schema', () => {\n      // The completer is a zod schema wrapped with completable\n      expect(resourceTypeCompleter).toBeDefined();\n      // It should have the zod parse method\n      expect(typeof (resourceTypeCompleter as any).parse).toBe('function');\n    });\n\n    it('should validate string resource types', () => {\n      // Test that valid strings pass validation\n      expect(() => (resourceTypeCompleter as any).parse('Text')).not.toThrow();\n      expect(() => (resourceTypeCompleter as any).parse('Blob')).not.toThrow();\n    });\n  });\n\n  describe('resourceIdForPromptCompleter', () => {\n    it('should be defined as a completable schema', () => {\n      expect(resourceIdForPromptCompleter).toBeDefined();\n      expect(typeof (resourceIdForPromptCompleter as any).parse).toBe('function');\n    });\n\n    it('should validate string IDs', () => {\n      // Test that valid strings pass validation\n      expect(() => (resourceIdForPromptCompleter as any).parse('1')).not.toThrow();\n      expect(() => (resourceIdForPromptCompleter as any).parse('100')).not.toThrow();\n    });\n  });\n\n  describe('resourceIdForResourceTemplateCompleter', () => {\n    it('should validate positive integer IDs', () => {\n      expect(resourceIdForResourceTemplateCompleter('1')).toEqual(['1']);\n      expect(resourceIdForResourceTemplateCompleter('50')).toEqual(['50']);\n    });\n\n    it('should reject invalid IDs', () => {\n      expect(resourceIdForResourceTemplateCompleter('0')).toEqual([]);\n      expect(resourceIdForResourceTemplateCompleter('-5')).toEqual([]);\n      expect(resourceIdForResourceTemplateCompleter('not-a-number')).toEqual([]);\n    });\n  });\n\n  describe('registerResourceTemplates', () => {\n    it('should register text and blob resource templates', () => {\n      const registeredResources: any[] = [];\n\n      const mockServer = {\n        registerResource: vi.fn((...args) => {\n          registeredResources.push(args);\n        }),\n      } as unknown as McpServer;\n\n      registerResourceTemplates(mockServer);\n\n      expect(mockServer.registerResource).toHaveBeenCalledTimes(2);\n\n      // Check text resource registration\n      const textRegistration = registeredResources.find((r) =>\n        r[0].includes('Text')\n      );\n      expect(textRegistration).toBeDefined();\n      expect(textRegistration[1]).toBeInstanceOf(ResourceTemplate);\n\n      // Check blob resource registration\n      const blobRegistration = registeredResources.find((r) =>\n        r[0].includes('Blob')\n      );\n      expect(blobRegistration).toBeDefined();\n    });\n  });\n});\n\ndescribe('Session Resources', () => {\n  describe('getSessionResourceURI', () => {\n    it('should generate correct URI for resource name', () => {\n      expect(getSessionResourceURI('test')).toBe('demo://resource/session/test');\n    });\n\n    it('should handle various resource names', () => {\n      expect(getSessionResourceURI('my-file')).toBe('demo://resource/session/my-file');\n      expect(getSessionResourceURI('document_123')).toBe(\n        'demo://resource/session/document_123'\n      );\n    });\n  });\n\n  describe('registerSessionResource', () => {\n    it('should register text resource and return resource link', () => {\n      const registrations: any[] = [];\n      const mockServer = {\n        registerResource: vi.fn((...args) => {\n          registrations.push(args);\n        }),\n      } as unknown as McpServer;\n\n      const resource = {\n        uri: 'demo://resource/session/test-file',\n        name: 'test-file',\n        mimeType: 'text/plain',\n        description: 'A test file',\n      };\n\n      const result = registerSessionResource(\n        mockServer,\n        resource,\n        'text',\n        'Hello, World!'\n      );\n\n      expect(result.type).toBe('resource_link');\n      expect(result.uri).toBe(resource.uri);\n      expect(result.name).toBe(resource.name);\n\n      expect(mockServer.registerResource).toHaveBeenCalledWith(\n        'test-file',\n        'demo://resource/session/test-file',\n        expect.objectContaining({\n          mimeType: 'text/plain',\n          description: 'A test file',\n        }),\n        expect.any(Function)\n      );\n    });\n\n    it('should register blob resource correctly', () => {\n      const mockServer = {\n        registerResource: vi.fn(),\n      } as unknown as McpServer;\n\n      const resource = {\n        uri: 'demo://resource/session/binary-file',\n        name: 'binary-file',\n        mimeType: 'application/octet-stream',\n      };\n\n      const blobContent = Buffer.from('binary data').toString('base64');\n      const result = registerSessionResource(mockServer, resource, 'blob', blobContent);\n\n      expect(result.type).toBe('resource_link');\n      expect(mockServer.registerResource).toHaveBeenCalled();\n    });\n\n    it('should return resource handler that provides correct content', async () => {\n      let capturedHandler: Function | null = null;\n      const mockServer = {\n        registerResource: vi.fn((_name, _uri, _config, handler) => {\n          capturedHandler = handler;\n        }),\n      } as unknown as McpServer;\n\n      const resource = {\n        uri: 'demo://resource/session/content-test',\n        name: 'content-test',\n        mimeType: 'text/plain',\n      };\n\n      registerSessionResource(mockServer, resource, 'text', 'Test content here');\n\n      expect(capturedHandler).not.toBeNull();\n\n      const handlerResult = await capturedHandler!(new URL(resource.uri));\n      expect(handlerResult.contents).toHaveLength(1);\n      expect(handlerResult.contents[0].text).toBe('Test content here');\n      expect(handlerResult.contents[0].mimeType).toBe('text/plain');\n    });\n  });\n});\n\ndescribe('File Resources', () => {\n  describe('registerFileResources', () => {\n    it('should register file resources when docs directory exists', () => {\n      const mockServer = {\n        registerResource: vi.fn(),\n      } as unknown as McpServer;\n\n      registerFileResources(mockServer);\n\n      // The docs folder exists in the everything server and contains files\n      // so registerResource should have been called\n      expect(mockServer.registerResource).toHaveBeenCalled();\n    });\n  });\n});\n\ndescribe('Subscriptions', () => {\n  describe('setSubscriptionHandlers', () => {\n    it('should set request handlers on server', () => {\n      const mockServer = {\n        server: {\n          setRequestHandler: vi.fn(),\n        },\n        sendLoggingMessage: vi.fn(),\n      } as unknown as McpServer;\n\n      setSubscriptionHandlers(mockServer);\n\n      // Should set both subscribe and unsubscribe handlers\n      expect(mockServer.server.setRequestHandler).toHaveBeenCalledTimes(2);\n    });\n  });\n\n  describe('simulated resource updates lifecycle', () => {\n    afterEach(() => {\n      // Clean up any intervals\n      stopSimulatedResourceUpdates('lifecycle-test-session');\n    });\n\n    it('should start and stop updates without errors', () => {\n      const mockServer = {\n        server: {\n          notification: vi.fn(),\n        },\n      } as unknown as McpServer;\n\n      // Start updates - should work for both defined and undefined sessionId\n      beginSimulatedResourceUpdates(mockServer, 'lifecycle-test-session');\n      beginSimulatedResourceUpdates(mockServer, undefined);\n\n      // Stop updates - should handle all cases gracefully\n      stopSimulatedResourceUpdates('lifecycle-test-session');\n      stopSimulatedResourceUpdates('non-existent-session');\n      stopSimulatedResourceUpdates(undefined);\n\n      // If we got here without throwing, the lifecycle works correctly\n      expect(true).toBe(true);\n    });\n  });\n});\n"
  },
  {
    "path": "src/everything/__tests__/server.test.ts",
    "content": "import { describe, it, expect, vi } from 'vitest';\nimport { createServer } from '../server/index.js';\n\ndescribe('Server Factory', () => {\n  describe('createServer', () => {\n    it('should return a ServerFactoryResponse object', () => {\n      const result = createServer();\n\n      expect(result).toHaveProperty('server');\n      expect(result).toHaveProperty('cleanup');\n    });\n\n    it('should return a cleanup function', () => {\n      const { cleanup } = createServer();\n\n      expect(typeof cleanup).toBe('function');\n    });\n\n    it('should create an McpServer instance', () => {\n      const { server } = createServer();\n\n      expect(server).toBeDefined();\n      expect(server.server).toBeDefined();\n    });\n\n    it('should have an oninitialized handler set', () => {\n      const { server } = createServer();\n\n      expect(server.server.oninitialized).toBeDefined();\n    });\n\n    it('should allow multiple servers to be created', () => {\n      const result1 = createServer();\n      const result2 = createServer();\n\n      expect(result1.server).toBeDefined();\n      expect(result2.server).toBeDefined();\n      expect(result1.server).not.toBe(result2.server);\n    });\n  });\n});\n"
  },
  {
    "path": "src/everything/__tests__/tools.test.ts",
    "content": "import { describe, it, expect, vi } from 'vitest';\nimport { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';\nimport { registerEchoTool, EchoSchema } from '../tools/echo.js';\nimport { registerGetSumTool } from '../tools/get-sum.js';\nimport { registerGetEnvTool } from '../tools/get-env.js';\nimport { registerGetTinyImageTool, MCP_TINY_IMAGE } from '../tools/get-tiny-image.js';\nimport { registerGetStructuredContentTool } from '../tools/get-structured-content.js';\nimport { registerGetAnnotatedMessageTool } from '../tools/get-annotated-message.js';\nimport { registerTriggerLongRunningOperationTool } from '../tools/trigger-long-running-operation.js';\nimport { registerGetResourceLinksTool } from '../tools/get-resource-links.js';\nimport { registerGetResourceReferenceTool } from '../tools/get-resource-reference.js';\nimport { registerToggleSimulatedLoggingTool } from '../tools/toggle-simulated-logging.js';\nimport { registerToggleSubscriberUpdatesTool } from '../tools/toggle-subscriber-updates.js';\nimport { registerTriggerSamplingRequestTool } from '../tools/trigger-sampling-request.js';\nimport { registerTriggerElicitationRequestTool } from '../tools/trigger-elicitation-request.js';\nimport { registerGetRootsListTool } from '../tools/get-roots-list.js';\nimport { registerGZipFileAsResourceTool } from '../tools/gzip-file-as-resource.js';\n\n// Helper to capture registered tool handlers\nfunction createMockServer() {\n  const handlers: Map<string, Function> = new Map();\n  const configs: Map<string, any> = new Map();\n\n  const mockServer = {\n    registerTool: vi.fn((name: string, config: any, handler: Function) => {\n      handlers.set(name, handler);\n      configs.set(name, config);\n    }),\n    server: {\n      getClientCapabilities: vi.fn(() => ({})),\n      notification: vi.fn(),\n    },\n    sendLoggingMessage: vi.fn(),\n    sendResourceUpdated: vi.fn(),\n  } as unknown as McpServer;\n\n  return { mockServer, handlers, configs };\n}\n\ndescribe('Tools', () => {\n  describe('echo', () => {\n    it('should echo back the message', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerEchoTool(mockServer);\n\n      const handler = handlers.get('echo')!;\n      const result = await handler({ message: 'Hello, World!' });\n\n      expect(result).toEqual({\n        content: [{ type: 'text', text: 'Echo: Hello, World!' }],\n      });\n    });\n\n    it('should handle empty message', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerEchoTool(mockServer);\n\n      const handler = handlers.get('echo')!;\n      const result = await handler({ message: '' });\n\n      expect(result).toEqual({\n        content: [{ type: 'text', text: 'Echo: ' }],\n      });\n    });\n\n    it('should reject invalid input', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerEchoTool(mockServer);\n\n      const handler = handlers.get('echo')!;\n\n      await expect(handler({})).rejects.toThrow();\n      await expect(handler({ message: 123 })).rejects.toThrow();\n    });\n  });\n\n  describe('EchoSchema', () => {\n    it('should validate correct input', () => {\n      const result = EchoSchema.parse({ message: 'test' });\n      expect(result).toEqual({ message: 'test' });\n    });\n\n    it('should reject missing message', () => {\n      expect(() => EchoSchema.parse({})).toThrow();\n    });\n\n    it('should reject non-string message', () => {\n      expect(() => EchoSchema.parse({ message: 123 })).toThrow();\n    });\n  });\n\n  describe('get-sum', () => {\n    it('should calculate sum of two positive numbers', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetSumTool(mockServer);\n\n      const handler = handlers.get('get-sum')!;\n      const result = await handler({ a: 5, b: 3 });\n\n      expect(result).toEqual({\n        content: [{ type: 'text', text: 'The sum of 5 and 3 is 8.' }],\n      });\n    });\n\n    it('should calculate sum with negative numbers', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetSumTool(mockServer);\n\n      const handler = handlers.get('get-sum')!;\n      const result = await handler({ a: -5, b: 3 });\n\n      expect(result).toEqual({\n        content: [{ type: 'text', text: 'The sum of -5 and 3 is -2.' }],\n      });\n    });\n\n    it('should calculate sum with zero', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetSumTool(mockServer);\n\n      const handler = handlers.get('get-sum')!;\n      const result = await handler({ a: 0, b: 0 });\n\n      expect(result).toEqual({\n        content: [{ type: 'text', text: 'The sum of 0 and 0 is 0.' }],\n      });\n    });\n\n    it('should handle floating point numbers', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetSumTool(mockServer);\n\n      const handler = handlers.get('get-sum')!;\n      const result = await handler({ a: 1.5, b: 2.5 });\n\n      expect(result).toEqual({\n        content: [{ type: 'text', text: 'The sum of 1.5 and 2.5 is 4.' }],\n      });\n    });\n\n    it('should reject invalid input', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetSumTool(mockServer);\n\n      const handler = handlers.get('get-sum')!;\n\n      await expect(handler({})).rejects.toThrow();\n      await expect(handler({ a: 'not a number', b: 5 })).rejects.toThrow();\n      await expect(handler({ a: 5 })).rejects.toThrow();\n    });\n  });\n\n  describe('get-env', () => {\n    it('should return all environment variables as JSON', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetEnvTool(mockServer);\n\n      const handler = handlers.get('get-env')!;\n      process.env.TEST_VAR_EVERYTHING = 'test_value';\n      const result = await handler({});\n\n      expect(result.content).toHaveLength(1);\n      expect(result.content[0].type).toBe('text');\n\n      const envJson = JSON.parse(result.content[0].text);\n      expect(envJson.TEST_VAR_EVERYTHING).toBe('test_value');\n\n      delete process.env.TEST_VAR_EVERYTHING;\n    });\n\n    it('should return valid JSON', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetEnvTool(mockServer);\n\n      const handler = handlers.get('get-env')!;\n      const result = await handler({});\n\n      expect(() => JSON.parse(result.content[0].text)).not.toThrow();\n    });\n  });\n\n  describe('get-tiny-image', () => {\n    it('should return image content with text descriptions', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetTinyImageTool(mockServer);\n\n      const handler = handlers.get('get-tiny-image')!;\n      const result = await handler({});\n\n      expect(result.content).toHaveLength(3);\n      expect(result.content[0]).toEqual({\n        type: 'text',\n        text: \"Here's the image you requested:\",\n      });\n      expect(result.content[1]).toEqual({\n        type: 'image',\n        data: MCP_TINY_IMAGE,\n        mimeType: 'image/png',\n      });\n      expect(result.content[2]).toEqual({\n        type: 'text',\n        text: 'The image above is the MCP logo.',\n      });\n    });\n\n    it('should return valid base64 image data', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetTinyImageTool(mockServer);\n\n      const handler = handlers.get('get-tiny-image')!;\n      const result = await handler({});\n\n      const imageContent = result.content[1];\n      expect(imageContent.type).toBe('image');\n      expect(imageContent.mimeType).toBe('image/png');\n      // Verify it's valid base64\n      expect(() => Buffer.from(imageContent.data, 'base64')).not.toThrow();\n    });\n  });\n\n  describe('get-structured-content', () => {\n    it('should return weather for New York', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetStructuredContentTool(mockServer);\n\n      const handler = handlers.get('get-structured-content')!;\n      const result = await handler({ location: 'New York' });\n\n      expect(result.structuredContent).toEqual({\n        temperature: 33,\n        conditions: 'Cloudy',\n        humidity: 82,\n      });\n      expect(result.content[0].type).toBe('text');\n      expect(JSON.parse(result.content[0].text)).toEqual(result.structuredContent);\n    });\n\n    it('should return weather for Chicago', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetStructuredContentTool(mockServer);\n\n      const handler = handlers.get('get-structured-content')!;\n      const result = await handler({ location: 'Chicago' });\n\n      expect(result.structuredContent).toEqual({\n        temperature: 36,\n        conditions: 'Light rain / drizzle',\n        humidity: 82,\n      });\n    });\n\n    it('should return weather for Los Angeles', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetStructuredContentTool(mockServer);\n\n      const handler = handlers.get('get-structured-content')!;\n      const result = await handler({ location: 'Los Angeles' });\n\n      expect(result.structuredContent).toEqual({\n        temperature: 73,\n        conditions: 'Sunny / Clear',\n        humidity: 48,\n      });\n    });\n  });\n\n  describe('get-annotated-message', () => {\n    it('should return error message with high priority', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetAnnotatedMessageTool(mockServer);\n\n      const handler = handlers.get('get-annotated-message')!;\n      const result = await handler({ messageType: 'error', includeImage: false });\n\n      expect(result.content).toHaveLength(1);\n      expect(result.content[0].text).toBe('Error: Operation failed');\n      expect(result.content[0].annotations).toEqual({\n        priority: 1.0,\n        audience: ['user', 'assistant'],\n      });\n    });\n\n    it('should return success message with medium priority', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetAnnotatedMessageTool(mockServer);\n\n      const handler = handlers.get('get-annotated-message')!;\n      const result = await handler({ messageType: 'success', includeImage: false });\n\n      expect(result.content[0].text).toBe('Operation completed successfully');\n      expect(result.content[0].annotations.priority).toBe(0.7);\n      expect(result.content[0].annotations.audience).toEqual(['user']);\n    });\n\n    it('should return debug message with low priority', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetAnnotatedMessageTool(mockServer);\n\n      const handler = handlers.get('get-annotated-message')!;\n      const result = await handler({ messageType: 'debug', includeImage: false });\n\n      expect(result.content[0].text).toContain('Debug:');\n      expect(result.content[0].annotations.priority).toBe(0.3);\n      expect(result.content[0].annotations.audience).toEqual(['assistant']);\n    });\n\n    it('should include annotated image when requested', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetAnnotatedMessageTool(mockServer);\n\n      const handler = handlers.get('get-annotated-message')!;\n      const result = await handler({ messageType: 'success', includeImage: true });\n\n      expect(result.content).toHaveLength(2);\n      expect(result.content[1].type).toBe('image');\n      expect(result.content[1].annotations).toEqual({\n        priority: 0.5,\n        audience: ['user'],\n      });\n    });\n  });\n\n  describe('trigger-long-running-operation', () => {\n    it('should complete operation and return result', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerTriggerLongRunningOperationTool(mockServer);\n\n      const handler = handlers.get('trigger-long-running-operation')!;\n      // Use very short duration for test\n      const result = await handler(\n        { duration: 0.1, steps: 2 },\n        { _meta: {}, requestId: 'test-123' }\n      );\n\n      expect(result.content[0].text).toContain('Long running operation completed');\n      expect(result.content[0].text).toContain('Duration: 0.1 seconds');\n      expect(result.content[0].text).toContain('Steps: 2');\n    }, 10000);\n\n    it('should send progress notifications when progressToken provided', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerTriggerLongRunningOperationTool(mockServer);\n\n      const handler = handlers.get('trigger-long-running-operation')!;\n      await handler(\n        { duration: 0.1, steps: 2 },\n        { _meta: { progressToken: 'token-123' }, requestId: 'test-456', sessionId: 'session-1' }\n      );\n\n      expect(mockServer.server.notification).toHaveBeenCalledTimes(2);\n      expect(mockServer.server.notification).toHaveBeenCalledWith(\n        expect.objectContaining({\n          method: 'notifications/progress',\n          params: expect.objectContaining({\n            progressToken: 'token-123',\n          }),\n        }),\n        expect.any(Object)\n      );\n    }, 10000);\n  });\n\n  describe('get-resource-links', () => {\n    it('should return specified number of resource links', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetResourceLinksTool(mockServer);\n\n      const handler = handlers.get('get-resource-links')!;\n      const result = await handler({ count: 3 });\n\n      // 1 intro text + 3 resource links\n      expect(result.content).toHaveLength(4);\n      expect(result.content[0].type).toBe('text');\n      expect(result.content[0].text).toContain('3 resource links');\n\n      // Check resource links\n      for (let i = 1; i < 4; i++) {\n        expect(result.content[i].type).toBe('resource_link');\n        expect(result.content[i].uri).toBeDefined();\n        expect(result.content[i].name).toBeDefined();\n      }\n    });\n\n    it('should alternate between text and blob resources', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetResourceLinksTool(mockServer);\n\n      const handler = handlers.get('get-resource-links')!;\n      const result = await handler({ count: 4 });\n\n      // Odd IDs (1, 3) are blob, even IDs (2, 4) are text\n      expect(result.content[1].name).toContain('Blob');\n      expect(result.content[2].name).toContain('Text');\n      expect(result.content[3].name).toContain('Blob');\n      expect(result.content[4].name).toContain('Text');\n    });\n\n    it('should use default count of 3', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetResourceLinksTool(mockServer);\n\n      const handler = handlers.get('get-resource-links')!;\n      const result = await handler({});\n\n      // 1 intro text + 3 resource links (default)\n      expect(result.content).toHaveLength(4);\n    });\n  });\n\n  describe('get-resource-reference', () => {\n    it('should return text resource reference', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetResourceReferenceTool(mockServer);\n\n      const handler = handlers.get('get-resource-reference')!;\n      const result = await handler({ resourceType: 'Text', resourceId: 1 });\n\n      expect(result.content).toHaveLength(3);\n      expect(result.content[0].text).toContain('Resource 1');\n      expect(result.content[1].type).toBe('resource');\n      expect(result.content[1].resource.uri).toContain('text/1');\n      expect(result.content[2].text).toContain('URI');\n    });\n\n    it('should return blob resource reference', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetResourceReferenceTool(mockServer);\n\n      const handler = handlers.get('get-resource-reference')!;\n      const result = await handler({ resourceType: 'Blob', resourceId: 5 });\n\n      expect(result.content[1].resource.uri).toContain('blob/5');\n    });\n\n    it('should reject invalid resource type', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetResourceReferenceTool(mockServer);\n\n      const handler = handlers.get('get-resource-reference')!;\n      await expect(handler({ resourceType: 'Invalid', resourceId: 1 })).rejects.toThrow(\n        'Invalid resourceType'\n      );\n    });\n\n    it('should reject invalid resource ID', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerGetResourceReferenceTool(mockServer);\n\n      const handler = handlers.get('get-resource-reference')!;\n      await expect(handler({ resourceType: 'Text', resourceId: -1 })).rejects.toThrow(\n        'Invalid resourceId'\n      );\n      await expect(handler({ resourceType: 'Text', resourceId: 0 })).rejects.toThrow(\n        'Invalid resourceId'\n      );\n      await expect(handler({ resourceType: 'Text', resourceId: 1.5 })).rejects.toThrow(\n        'Invalid resourceId'\n      );\n    });\n  });\n\n  describe('toggle-simulated-logging', () => {\n    it('should start logging when not active', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerToggleSimulatedLoggingTool(mockServer);\n\n      const handler = handlers.get('toggle-simulated-logging')!;\n      const result = await handler({}, { sessionId: 'test-session-1' });\n\n      expect(result.content[0].text).toContain('Started');\n      expect(result.content[0].text).toContain('test-session-1');\n    });\n\n    it('should stop logging when already active', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerToggleSimulatedLoggingTool(mockServer);\n\n      const handler = handlers.get('toggle-simulated-logging')!;\n\n      // First call starts logging\n      await handler({}, { sessionId: 'test-session-2' });\n\n      // Second call stops logging\n      const result = await handler({}, { sessionId: 'test-session-2' });\n\n      expect(result.content[0].text).toContain('Stopped');\n      expect(result.content[0].text).toContain('test-session-2');\n    });\n\n    it('should handle undefined sessionId', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerToggleSimulatedLoggingTool(mockServer);\n\n      const handler = handlers.get('toggle-simulated-logging')!;\n      const result = await handler({}, {});\n\n      expect(result.content[0].text).toContain('Started');\n    });\n  });\n\n  describe('toggle-subscriber-updates', () => {\n    it('should start updates when not active', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerToggleSubscriberUpdatesTool(mockServer);\n\n      const handler = handlers.get('toggle-subscriber-updates')!;\n      const result = await handler({}, { sessionId: 'sub-session-1' });\n\n      expect(result.content[0].text).toContain('Started');\n      expect(result.content[0].text).toContain('sub-session-1');\n    });\n\n    it('should stop updates when already active', async () => {\n      const { mockServer, handlers } = createMockServer();\n      registerToggleSubscriberUpdatesTool(mockServer);\n\n      const handler = handlers.get('toggle-subscriber-updates')!;\n\n      // First call starts updates\n      await handler({}, { sessionId: 'sub-session-2' });\n\n      // Second call stops updates\n      const result = await handler({}, { sessionId: 'sub-session-2' });\n\n      expect(result.content[0].text).toContain('Stopped');\n      expect(result.content[0].text).toContain('sub-session-2');\n    });\n  });\n\n  describe('trigger-sampling-request', () => {\n    it('should not register when client does not support sampling', () => {\n      const { mockServer } = createMockServer();\n      registerTriggerSamplingRequestTool(mockServer);\n\n      // Tool should not be registered since mock server returns empty capabilities\n      expect(mockServer.registerTool).not.toHaveBeenCalled();\n    });\n\n    it('should register when client supports sampling', () => {\n      const handlers: Map<string, Function> = new Map();\n      const mockServer = {\n        registerTool: vi.fn((name: string, config: any, handler: Function) => {\n          handlers.set(name, handler);\n        }),\n        server: {\n          getClientCapabilities: vi.fn(() => ({ sampling: {} })),\n        },\n      } as unknown as McpServer;\n\n      registerTriggerSamplingRequestTool(mockServer);\n\n      expect(mockServer.registerTool).toHaveBeenCalledWith(\n        'trigger-sampling-request',\n        expect.objectContaining({\n          title: 'Trigger Sampling Request Tool',\n          description: expect.stringContaining('Sampling'),\n        }),\n        expect.any(Function)\n      );\n    });\n\n    it('should send sampling request and return result', async () => {\n      const handlers: Map<string, Function> = new Map();\n      const mockSendRequest = vi.fn().mockResolvedValue({\n        model: 'test-model',\n        content: { type: 'text', text: 'LLM response' },\n      });\n\n      const mockServer = {\n        registerTool: vi.fn((name: string, config: any, handler: Function) => {\n          handlers.set(name, handler);\n        }),\n        server: {\n          getClientCapabilities: vi.fn(() => ({ sampling: {} })),\n        },\n      } as unknown as McpServer;\n\n      registerTriggerSamplingRequestTool(mockServer);\n\n      const handler = handlers.get('trigger-sampling-request')!;\n      const result = await handler(\n        { prompt: 'Test prompt', maxTokens: 50 },\n        { sendRequest: mockSendRequest }\n      );\n\n      expect(mockSendRequest).toHaveBeenCalledWith(\n        expect.objectContaining({\n          method: 'sampling/createMessage',\n          params: expect.objectContaining({\n            maxTokens: 50,\n          }),\n        }),\n        expect.anything()\n      );\n      expect(result.content[0].text).toContain('LLM sampling result');\n    });\n  });\n\n  describe('trigger-elicitation-request', () => {\n    it('should not register when client does not support elicitation', () => {\n      const { mockServer } = createMockServer();\n      registerTriggerElicitationRequestTool(mockServer);\n\n      expect(mockServer.registerTool).not.toHaveBeenCalled();\n    });\n\n    it('should register when client supports elicitation', () => {\n      const handlers: Map<string, Function> = new Map();\n      const mockServer = {\n        registerTool: vi.fn((name: string, config: any, handler: Function) => {\n          handlers.set(name, handler);\n        }),\n        server: {\n          getClientCapabilities: vi.fn(() => ({ elicitation: {} })),\n        },\n      } as unknown as McpServer;\n\n      registerTriggerElicitationRequestTool(mockServer);\n\n      expect(mockServer.registerTool).toHaveBeenCalledWith(\n        'trigger-elicitation-request',\n        expect.objectContaining({\n          title: 'Trigger Elicitation Request Tool',\n          description: expect.stringContaining('Elicitation'),\n        }),\n        expect.any(Function)\n      );\n    });\n\n    it('should handle accept action with user content', async () => {\n      const handlers: Map<string, Function> = new Map();\n      const mockSendRequest = vi.fn().mockResolvedValue({\n        action: 'accept',\n        content: {\n          name: 'John Doe',\n          check: true,\n          email: 'john@example.com',\n        },\n      });\n\n      const mockServer = {\n        registerTool: vi.fn((name: string, config: any, handler: Function) => {\n          handlers.set(name, handler);\n        }),\n        server: {\n          getClientCapabilities: vi.fn(() => ({ elicitation: {} })),\n        },\n      } as unknown as McpServer;\n\n      registerTriggerElicitationRequestTool(mockServer);\n\n      const handler = handlers.get('trigger-elicitation-request')!;\n      const result = await handler({}, { sendRequest: mockSendRequest });\n\n      expect(result.content[0].text).toContain('✅');\n      expect(result.content[0].text).toContain('provided');\n      expect(result.content[1].text).toContain('John Doe');\n    });\n\n    it('should handle decline action', async () => {\n      const handlers: Map<string, Function> = new Map();\n      const mockSendRequest = vi.fn().mockResolvedValue({\n        action: 'decline',\n      });\n\n      const mockServer = {\n        registerTool: vi.fn((name: string, config: any, handler: Function) => {\n          handlers.set(name, handler);\n        }),\n        server: {\n          getClientCapabilities: vi.fn(() => ({ elicitation: {} })),\n        },\n      } as unknown as McpServer;\n\n      registerTriggerElicitationRequestTool(mockServer);\n\n      const handler = handlers.get('trigger-elicitation-request')!;\n      const result = await handler({}, { sendRequest: mockSendRequest });\n\n      expect(result.content[0].text).toContain('❌');\n      expect(result.content[0].text).toContain('declined');\n    });\n\n    it('should handle cancel action', async () => {\n      const handlers: Map<string, Function> = new Map();\n      const mockSendRequest = vi.fn().mockResolvedValue({\n        action: 'cancel',\n      });\n\n      const mockServer = {\n        registerTool: vi.fn((name: string, config: any, handler: Function) => {\n          handlers.set(name, handler);\n        }),\n        server: {\n          getClientCapabilities: vi.fn(() => ({ elicitation: {} })),\n        },\n      } as unknown as McpServer;\n\n      registerTriggerElicitationRequestTool(mockServer);\n\n      const handler = handlers.get('trigger-elicitation-request')!;\n      const result = await handler({}, { sendRequest: mockSendRequest });\n\n      expect(result.content[0].text).toContain('⚠️');\n      expect(result.content[0].text).toContain('cancelled');\n    });\n  });\n\n  describe('get-roots-list', () => {\n    it('should not register when client does not support roots', () => {\n      const { mockServer } = createMockServer();\n      registerGetRootsListTool(mockServer);\n\n      expect(mockServer.registerTool).not.toHaveBeenCalled();\n    });\n\n    it('should register when client supports roots', () => {\n      const handlers: Map<string, Function> = new Map();\n      const mockServer = {\n        registerTool: vi.fn((name: string, config: any, handler: Function) => {\n          handlers.set(name, handler);\n        }),\n        server: {\n          getClientCapabilities: vi.fn(() => ({ roots: {} })),\n        },\n      } as unknown as McpServer;\n\n      registerGetRootsListTool(mockServer);\n\n      expect(mockServer.registerTool).toHaveBeenCalledWith(\n        'get-roots-list',\n        expect.objectContaining({\n          title: 'Get Roots List Tool',\n          description: expect.stringContaining('roots'),\n        }),\n        expect.any(Function)\n      );\n    });\n  });\n\n  describe('gzip-file-as-resource', () => {\n    it('should compress data URI and return resource link', async () => {\n      const registeredResources: any[] = [];\n      const mockServer = {\n        registerTool: vi.fn(),\n        registerResource: vi.fn((...args) => {\n          registeredResources.push(args);\n        }),\n      } as unknown as McpServer;\n\n      // Get the handler\n      let handler: Function | null = null;\n      (mockServer.registerTool as any).mockImplementation(\n        (name: string, config: any, h: Function) => {\n          handler = h;\n        }\n      );\n\n      registerGZipFileAsResourceTool(mockServer);\n\n      // Create a data URI with test content\n      const testContent = 'Hello, World!';\n      const dataUri = `data:text/plain;base64,${Buffer.from(testContent).toString('base64')}`;\n\n      const result = await handler!(\n        { name: 'test.txt.gz', data: dataUri, outputType: 'resourceLink' }\n      );\n\n      expect(result.content[0].type).toBe('resource_link');\n      expect(result.content[0].uri).toContain('test.txt.gz');\n    });\n\n    it('should return resource directly when outputType is resource', async () => {\n      const mockServer = {\n        registerTool: vi.fn(),\n        registerResource: vi.fn(),\n      } as unknown as McpServer;\n\n      let handler: Function | null = null;\n      (mockServer.registerTool as any).mockImplementation(\n        (name: string, config: any, h: Function) => {\n          handler = h;\n        }\n      );\n\n      registerGZipFileAsResourceTool(mockServer);\n\n      const testContent = 'Test content for compression';\n      const dataUri = `data:text/plain;base64,${Buffer.from(testContent).toString('base64')}`;\n\n      const result = await handler!(\n        { name: 'output.gz', data: dataUri, outputType: 'resource' }\n      );\n\n      expect(result.content[0].type).toBe('resource');\n      expect(result.content[0].resource.mimeType).toBe('application/gzip');\n      expect(result.content[0].resource.blob).toBeDefined();\n    });\n\n    it('should reject unsupported URL protocols', async () => {\n      const mockServer = {\n        registerTool: vi.fn(),\n        registerResource: vi.fn(),\n      } as unknown as McpServer;\n\n      let handler: Function | null = null;\n      (mockServer.registerTool as any).mockImplementation(\n        (name: string, config: any, h: Function) => {\n          handler = h;\n        }\n      );\n\n      registerGZipFileAsResourceTool(mockServer);\n\n      await expect(\n        handler!({ name: 'test.gz', data: 'ftp://example.com/file.txt', outputType: 'resource' })\n      ).rejects.toThrow('Unsupported URL protocol');\n    });\n  });\n});\n"
  },
  {
    "path": "src/everything/docs/architecture.md",
    "content": "# Everything Server – Architecture\n\n**Architecture\n| [Project Structure](structure.md)\n| [Startup Process](startup.md)\n| [Server Features](features.md)\n| [Extension Points](extension.md)\n| [How It Works](how-it-works.md)**\n\nThis documentation summarizes the current layout and runtime architecture of the `src/everything` package.\nIt explains how the server starts, how transports are wired, where tools, prompts, and resources are registered, and how to extend the system.\n\n## High‑level Overview\n\n### Purpose\n\nA minimal, modular MCP server showcasing core Model Context Protocol features. It exposes simple tools, prompts, and resources, and can be run over multiple transports (STDIO, SSE, and Streamable HTTP).\n\n### Design\n\nA small “server factory” constructs the MCP server and registers features.\nTransports are separate entry points that create/connect the server and handle network concerns.\nTools, prompts, and resources are organized in their own submodules.\n\n### Multi‑client\n\nThe server supports multiple concurrent clients. Tracking per session data is demonstrated with\nresource subscriptions and simulated logging.\n\n## Build and Distribution\n\n- TypeScript sources are compiled into `dist/` via `npm run build`.\n- The `build` script copies `docs/` into `dist/` so instruction files ship alongside the compiled server.\n- The CLI bin is configured in `package.json` as `mcp-server-everything` → `dist/index.js`.\n\n## [Project Structure](structure.md)\n\n## [Startup Process](startup.md)\n\n## [Server Features](features.md)\n\n## [Extension Points](extension.md)\n\n## [How It Works](how-it-works.md)\n"
  },
  {
    "path": "src/everything/docs/extension.md",
    "content": "# Everything Server - Extension Points\n\n**[Architecture](architecture.md)\n| [Project Structure](structure.md)\n| [Startup Process](startup.md)\n| [Server Features](features.md)\n| Extension Points\n| [How It Works](how-it-works.md)**\n\n## Adding Tools\n\n- Create a new file under `tools/` with your `registerXTool(server)` function that registers the tool via `server.registerTool(...)`.\n- Export and call it from `tools/index.ts` inside `registerTools(server)`.\n\n## Adding Prompts\n\n- Create a new file under `prompts/` with your `registerXPrompt(server)` function that registers the prompt via `server.registerPrompt(...)`.\n- Export and call it from `prompts/index.ts` inside `registerPrompts(server)`.\n\n## Adding Resources\n\n- Create a new file under `resources/` with your `registerXResources(server)` function using `server.registerResource(...)` (optionally with `ResourceTemplate`).\n- Export and call it from `resources/index.ts` inside `registerResources(server)`.\n"
  },
  {
    "path": "src/everything/docs/features.md",
    "content": "# Everything Server - Features\n\n**[Architecture](architecture.md)\n| [Project Structure](structure.md)\n| [Startup Process](startup.md)\n| Server Features\n| [Extension Points](extension.md)\n| [How It Works](how-it-works.md)**\n\n## Tools\n\n- `echo` (tools/echo.ts): Echoes the provided `message: string`. Uses Zod to validate inputs.\n- `get-annotated-message` (tools/get-annotated-message.ts): Returns a `text` message annotated with `priority` and `audience` based on `messageType` (`error`, `success`, or `debug`); can optionally include an annotated `image`.\n- `get-env` (tools/get-env.ts): Returns all environment variables from the running process as pretty-printed JSON text.\n- `get-resource-links` (tools/get-resource-links.ts): Returns an intro `text` block followed by multiple `resource_link` items. For a requested `count` (1–10), alternates between dynamic Text and Blob resources using URIs from `resources/templates.ts`.\n- `get-resource-reference` (tools/get-resource-reference.ts): Accepts `resourceType` (`text` or `blob`) and `resourceId` (positive integer). Returns a concrete `resource` content block (with its `uri`, `mimeType`, and data) with surrounding explanatory `text`.\n- `get-roots-list` (tools/get-roots-list.ts): Returns the last list of roots sent by the client.\n- `gzip-file-as-resource` (tools/gzip-file-as-resource.ts): Accepts a `name` and `data` (URL or data URI), fetches the data subject to size/time/domain constraints, compresses it, registers it as a session resource at `demo://resource/session/<name>` with `mimeType: application/gzip`, and returns either a `resource_link` (default) or an inline `resource` depending on `outputType`.\n- `get-structured-content` (tools/get-structured-content.ts): Demonstrates structured responses. Accepts `location` input and returns both backward‑compatible `content` (a `text` block containing JSON) and `structuredContent` validated by an `outputSchema` (temperature, conditions, humidity).\n- `get-sum` (tools/get-sum.ts): For two numbers `a` and `b` calculates and returns their sum. Uses Zod to validate inputs.\n- `get-tiny-image` (tools/get-tiny-image.ts): Returns a tiny PNG MCP logo as an `image` content item with brief descriptive text before and after.\n- `trigger-long-running-operation` (tools/trigger-trigger-long-running-operation.ts): Simulates a multi-step operation over a given `duration` and number of `steps`; reports progress via `notifications/progress` when a `progressToken` is provided by the client.\n- `toggle-simulated-logging` (tools/toggle-simulated-logging.ts): Starts or stops simulated, random‑leveled logging for the invoking session. Respects the client’s selected minimum logging level.\n- `toggle-subscriber-updates` (tools/toggle-subscriber-updates.ts): Starts or stops simulated resource update notifications for URIs the invoking session has subscribed to.\n- `trigger-sampling-request` (tools/trigger-sampling-request.ts): Issues a `sampling/createMessage` request to the client/LLM using provided `prompt` and optional generation controls; returns the LLM's response payload.\n- `simulate-research-query` (tools/simulate-research-query.ts): Demonstrates MCP Tasks (SEP-1686) with a simulated multi-stage research operation. Accepts `topic` and `ambiguous` parameters. Returns a task that progresses through stages with status updates. If `ambiguous` is true and client supports elicitation, sends an elicitation request directly to gather clarification before completing.\n- `trigger-sampling-request-async` (tools/trigger-sampling-request-async.ts): Demonstrates bidirectional tasks where the server sends a sampling request that the client executes as a background task. Server polls for status and retrieves the LLM result when complete. Requires client to support `tasks.requests.sampling.createMessage`.\n- `trigger-elicitation-request-async` (tools/trigger-elicitation-request-async.ts): Demonstrates bidirectional tasks where the server sends an elicitation request that the client executes as a background task. Server polls while waiting for user input. Requires client to support `tasks.requests.elicitation.create`.\n\n## Prompts\n\n- `simple-prompt` (prompts/simple.ts): No-argument prompt that returns a static user message.\n- `args-prompt` (prompts/args.ts): Two-argument prompt with `city` (required) and `state` (optional) used to compose a question.\n- `completable-prompt` (prompts/completions.ts): Demonstrates argument auto-completions with the SDK’s `completable` helper; `department` completions drive context-aware `name` suggestions.\n- `resource-prompt` (prompts/resource.ts): Accepts `resourceType` (\"Text\" or \"Blob\") and `resourceId` (string convertible to integer) and returns messages that include an embedded dynamic resource of the selected type generated via `resources/templates.ts`.\n\n## Resources\n\n- Dynamic Text: `demo://resource/dynamic/text/{index}` (content generated on the fly)\n- Dynamic Blob: `demo://resource/dynamic/blob/{index}` (base64 payload generated on the fly)\n- Static Documents: `demo://resource/static/document/<filename>` (serves files from `src/everything/docs/` as static file-based resources)\n- Session Scoped: `demo://resource/session/<name>` (per-session resources registered dynamically; available only for the lifetime of the session)\n\n## Resource Subscriptions and Notifications\n\n- Simulated update notifications are opt‑in and off by default.\n- Clients may subscribe/unsubscribe to resource URIs using the MCP `resources/subscribe` and `resources/unsubscribe` requests.\n- Use the `toggle-subscriber-updates` tool to start/stop a per‑session interval that emits `notifications/resources/updated { uri }` only for URIs that session has subscribed to.\n- Multiple concurrent clients are supported; each client’s subscriptions are tracked per session and notifications are delivered independently via the server instance associated with that session.\n\n## Simulated Logging\n\n- Simulated logging is available but off by default.\n- Use the `toggle-simulated-logging` tool to start/stop periodic log messages of varying levels (debug, info, notice, warning, error, critical, alert, emergency) per session.\n- Clients can control the minimum level they receive via the standard MCP `logging/setLevel` request.\n\n## Tasks (SEP-1686)\n\nThe server advertises support for MCP Tasks, enabling long-running operations with status tracking:\n\n- **Capabilities advertised**: `tasks.list`, `tasks.cancel`, `tasks.requests.tools.call`\n- **Task Store**: Uses `InMemoryTaskStore` from SDK experimental for task lifecycle management\n- **Message Queue**: Uses `InMemoryTaskMessageQueue` for task-related messaging\n\n### Task Lifecycle\n\n1. Client calls `tools/call` with `task: true` parameter\n2. Server returns `CreateTaskResult` with `taskId` instead of immediate result\n3. Client polls `tasks/get` to check status and receive `statusMessage` updates\n4. When status is `completed`, client calls `tasks/result` to retrieve the final result\n\n### Task Statuses\n\n- `working`: Task is actively processing\n- `input_required`: Task needs additional input (server sends elicitation request directly)\n- `completed`: Task finished successfully\n- `failed`: Task encountered an error\n- `cancelled`: Task was cancelled by client\n\n### Demo Tools\n\n**Server-side tasks (client calls server):**\nUse the `simulate-research-query` tool to exercise the full task lifecycle. Set `ambiguous: true` to trigger elicitation - the server will send an `elicitation/create` request directly and await the response before completing.\n\n**Client-side tasks (server calls client):**\nUse `trigger-sampling-request-async` or `trigger-elicitation-request-async` to demonstrate bidirectional tasks where the server sends requests that the client executes as background tasks. These require the client to advertise `tasks.requests.sampling.createMessage` or `tasks.requests.elicitation.create` capabilities respectively.\n\n### Bidirectional Task Flow\n\nMCP Tasks are bidirectional - both server and client can be task executors:\n\n| Direction        | Request Type             | Task Executor | Demo Tool                           |\n| ---------------- | ------------------------ | ------------- | ----------------------------------- |\n| Client -> Server | `tools/call`             | Server        | `simulate-research-query`           |\n| Server -> Client | `sampling/createMessage` | Client        | `trigger-sampling-request-async`    |\n| Server -> Client | `elicitation/create`     | Client        | `trigger-elicitation-request-async` |\n\nFor client-side tasks:\n\n1. Server sends request with task metadata (e.g., `params.task.ttl`)\n2. Client creates task and returns `CreateTaskResult` with `taskId`\n3. Server polls `tasks/get` for status updates\n4. When complete, server calls `tasks/result` to retrieve the result\n"
  },
  {
    "path": "src/everything/docs/how-it-works.md",
    "content": "# Everything Server - How It Works\n\n**[Architecture](architecture.md)\n| [Project Structure](structure.md)\n| [Startup Process](startup.md)\n| [Server Features](features.md)\n| [Extension Points](extension.md)\n| How It Works**\n\n# Conditional Tool Registration\n\n### Module: `server/index.ts`\n\n- Some tools require client support for the capability they demonstrate. These are:\n  - `get-roots-list`\n  - `trigger-elicitation-request`\n  - `trigger-sampling-request`\n- Client capabilities aren't known until after initilization handshake is complete.\n- Most tools are registered immediately during the Server Factory execution, prior to client connection.\n- To defer registration of these commands until client capabilities are known, a `registerConditionalTools(server)` function is invoked from an `onintitialized` handler.\n\n## Resource Subscriptions\n\n### Module: `resources/subscriptions.ts`\n\n- Tracks subscribers per URI: `Map<uri, Set<sessionId>>`.\n- Installs handlers via `setSubscriptionHandlers(server)` to process subscribe/unsubscribe requests and keep the map updated.\n- Updates are started/stopped on demand by the `toggle-subscriber-updates` tool, which calls `beginSimulatedResourceUpdates(server, sessionId)` and `stopSimulatedResourceUpdates(sessionId)`.\n- `cleanup(sessionId?)` calls `stopSimulatedResourceUpdates(sessionId)` to clear intervals and remove session‑scoped state.\n\n## Session‑scoped Resources\n\n### Module: `resources/session.ts`\n\n- `getSessionResourceURI(name: string)`: Builds a session resource URI: `demo://resource/session/<name>`.\n- `registerSessionResource(server, resource, type, payload)`: Registers a resource with the given `uri`, `name`, and `mimeType`, returning a `resource_link`. The content is served from memory for the life of the session only. Supports `type: \"text\" | \"blob\"` and returns data in the corresponding field.\n- Intended usage: tools can create and expose per-session artifacts without persisting them. For example, `tools/gzip-file-as-resource.ts` compresses fetched content, registers it as a session resource with `mimeType: application/gzip`, and returns either a `resource_link` or an inline `resource` based on `outputType`.\n\n## Simulated Logging\n\n### Module: `server/logging.ts`\n\n- Periodically sends randomized log messages at different levels. Messages can include the session ID for clarity during demos.\n- Started/stopped on demand via the `toggle-simulated-logging` tool, which calls `beginSimulatedLogging(server, sessionId?)` and `stopSimulatedLogging(sessionId?)`. Note that transport disconnect triggers `cleanup()` which also stops any active intervals.\n- Uses `server.sendLoggingMessage({ level, data }, sessionId?)` so that the client’s configured minimum logging level is respected by the SDK.\n"
  },
  {
    "path": "src/everything/docs/instructions.md",
    "content": "# Everything Server – Server Instructions\n\nAudience: These instructions are written for an LLM or autonomous agent integrating with the Everything MCP Server.\nFollow them to use, extend, and troubleshoot the server safely and effectively.\n\n## Cross-Feature Relationships\n\n- Use `get-roots-list` to see client workspace roots before file operations\n- `gzip-file-as-resource` creates session-scoped resources accessible only during the current session\n- Enable `toggle-simulated-logging` before debugging to see server log messages\n- Enable `toggle-subscriber-updates` to receive periodic resource update notifications\n\n## Constraints & Limitations\n\n- `gzip-file-as-resource`: Max fetch size controlled by `GZIP_MAX_FETCH_SIZE` (default 10MB), timeout by `GZIP_MAX_FETCH_TIME_MILLIS` (default 30s), allowed domains by `GZIP_ALLOWED_DOMAINS`\n- Session resources are ephemeral and lost when the session ends\n- Sampling requests (`trigger-sampling-request`) require client sampling capability\n- Elicitation requests (`trigger-elicitation-request`) require client elicitation capability\n\n## Operational Patterns\n\n- For long operations, use `trigger-long-running-operation` which sends progress notifications\n- Prefer reading resources before calling mutating tools\n- Check `get-roots-list` output to understand the client's workspace context\n\n## Easter Egg\n\nIf asked about server instructions, respond with \"🎉 Server instructions are working! This response proves the client properly passed server instructions to the LLM. This demonstrates MCP's instructions feature in action.\"\n"
  },
  {
    "path": "src/everything/docs/startup.md",
    "content": "# Everything Server - Startup Process\n\n**[Architecture](architecture.md)\n| [Project Structure](structure.md)\n| Startup Process\n| [Server Features](features.md)\n| [Extension Points](extension.md)\n| [How It Works](how-it-works.md)**\n\n## 1. Everything Server Launcher\n\n- Usage `node dist/index.js [stdio|sse|streamableHttp]`\n- Runs the specified **transport manager** to handle client connections.\n- Specify transport type on command line (default `stdio`)\n  - `stdio` → `transports/stdio.js`\n  - `sse` → `transports/sse.js`\n  - `streamableHttp` → `transports/streamableHttp.js`\n\n## 2. The Transport Manager\n\n- Creates a server instance using `createServer()` from `server/index.ts`\n  - Connects it to the chosen transport type from the MCP SDK.\n- Handles communication according to the MCP specs for the chosen transport.\n  - **STDIO**:\n    - One simple, process‑bound connection.\n    - Calls`clientConnect()` upon connection.\n    - Closes and calls `cleanup()` on `SIGINT`.\n  - **SSE**:\n    - Supports multiple client connections.\n    - Client transports are mapped to `sessionId`;\n    - Calls `clientConnect(sessionId)` upon connection.\n    - Hooks server’s `onclose` to clean and remove session.\n    - Exposes\n      - `/sse` **GET** (SSE stream)\n      - `/message` **POST** (JSON‑RPC messages)\n  - **Streamable HTTP**:\n    - Supports multiple client connections.\n    - Client transports are mapped to `sessionId`;\n    - Calls `clientConnect(sessionId)` upon connection.\n    - Exposes `/mcp` for\n      - **POST** (JSON‑RPC messages)\n      - **GET** (SSE stream)\n      - **DELETE** (termination)\n    - Uses an event store for resumability and stores transports by `sessionId`.\n    - Calls `cleanup(sessionId)` on **DELETE**.\n\n## 3. The Server Factory\n\n- Invoke `createServer()` from `server/index.ts`\n- Creates a new `McpServer` instance with\n  - **Capabilities**:\n    - `tools: {}`\n    - `logging: {}`\n    - `prompts: {}`\n    - `resources: { subscribe: true }`\n  - **Server Instructions**\n    - Loaded from the docs folder (`server-instructions.md`).\n  - **Registrations**\n    - Registers **tools** via `registerTools(server)`.\n    - Registers **resources** via `registerResources(server)`.\n    - Registers **prompts** via `registerPrompts(server)`.\n  - **Other Request Handlers**\n    - Sets up resource subscription handlers via `setSubscriptionHandlers(server)`.\n    - Roots list change handler is added post-connection via\n  - **Returns**\n    - The `McpServer` instance\n    - A `clientConnect(sessionId)` callback that enables post-connection setup\n    - A `cleanup(sessionId?)` callback that stops any active intervals and removes any session‑scoped state\n\n## Enabling Multiple Clients\n\nSome of the transport managers defined in the `transports` folder can support multiple clients.\nIn order to do so, they must map certain data to a session identifier.\n"
  },
  {
    "path": "src/everything/docs/structure.md",
    "content": "# Everything Server - Project Structure\n\n**[Architecture](architecture.md)\n| Project Structure\n| [Startup Process](startup.md)\n| [Server Features](features.md)\n| [Extension Points](extension.md)\n| [How It Works](how-it-works.md)**\n\n```\nsrc/everything\n     ├── index.ts\n     ├── AGENTS.md\n     ├── package.json\n     ├── docs\n     │   ├── architecture.md\n     │   ├── extension.md\n     │   ├── features.md\n     │   ├── how-it-works.md\n     │   ├── instructions.md\n     │   ├── startup.md\n     │   └── structure.md\n     ├── prompts\n     │   ├── index.ts\n     │   ├── args.ts\n     │   ├── completions.ts\n     │   ├── simple.ts\n     │   └── resource.ts\n     ├── resources\n     │   ├── index.ts\n     │   ├── files.ts\n     │   ├── session.ts\n     │   ├── subscriptions.ts\n     │   └── templates.ts\n     ├── server\n     │   ├── index.ts\n     │   ├── logging.ts\n     │   └── roots.ts\n     ├── tools\n     │   ├── index.ts\n     │   ├── echo.ts\n     │   ├── get-annotated-message.ts\n     │   ├── get-env.ts\n     │   ├── get-resource-links.ts\n     │   ├── get-resource-reference.ts\n     │   ├── get-roots-list.ts\n     │   ├── get-structured-content.ts\n     │   ├── get-sum.ts\n     │   ├── get-tiny-image.ts\n     │   ├── gzip-file-as-resource.ts\n     │   ├── toggle-simulated-logging.ts\n     │   ├── toggle-subscriber-updates.ts\n     │   ├── trigger-elicitation-request.ts\n     │   ├── trigger-long-running-operation.ts\n     │   └── trigger-sampling-request.ts\n     └── transports\n         ├── sse.ts\n         ├── stdio.ts\n         └── streamableHttp.ts\n```\n\n# Project Contents\n\n## `src/everything`:\n\n### `index.ts`\n\n- CLI entry point that selects and runs a specific transport module based on the first CLI argument: `stdio`, `sse`, or `streamableHttp`.\n\n### `AGENTS.md`\n\n- Directions for Agents/LLMs explaining coding guidelines and how to appropriately extend the server.\n\n### `package.json`\n\n- Package metadata and scripts:\n  - `build`: TypeScript compile to `dist/`, copies `docs/` into `dist/` and marks the compiled entry scripts as executable.\n  - `start:stdio`, `start:sse`, `start:streamableHttp`: Run built transports from `dist/`.\n- Declares dependencies on `@modelcontextprotocol/sdk`, `express`, `cors`, `zod`, etc.\n\n### `docs/`\n\n- `architecture.md`\n  - This document.\n- `server-instructions.md`\n  - Human‑readable instructions intended to be passed to the client/LLM as for guidance on server use. Loaded by the server at startup and returned in the \"initialize\" exchange.\n\n### `prompts/`\n\n- `index.ts`\n  - `registerPrompts(server)` orchestrator; delegates to prompt factory/registration methods from in individual prompt files.\n- `simple.ts`\n  - Registers `simple-prompt`: a prompt with no arguments that returns a single user message.\n- `args.ts`\n  - Registers `args-prompt`: a prompt with two arguments (`city` required, `state` optional) used to compose a message.\n- `completions.ts`\n  - Registers `completable-prompt`: a prompt whose arguments support server-driven completions using the SDK’s `completable(...)` helper (e.g., completing `department` and context-aware `name`).\n- `resource.ts`\n  - Exposes `registerEmbeddedResourcePrompt(server)` which registers `resource-prompt` — a prompt that accepts `resourceType` (\"Text\" or \"Blob\") and `resourceId` (integer), and embeds a dynamically generated resource of the requested type within the returned messages. Internally reuses helpers from `resources/templates.ts`.\n\n### `resources/`\n\n- `index.ts`\n  - `registerResources(server)` orchestrator; delegates to resource factory/registration methods from individual resource files.\n- `templates.ts`\n  - Registers two dynamic, template‑driven resources using `ResourceTemplate`:\n    - Text: `demo://resource/dynamic/text/{index}` (MIME: `text/plain`)\n    - Blob: `demo://resource/dynamic/blob/{index}` (MIME: `application/octet-stream`, Base64 payload)\n  - The `{index}` path variable must be a finite positive integer. Content is generated on demand with a timestamp.\n  - Exposes helpers `textResource(uri, index)`, `textResourceUri(index)`, `blobResource(uri, index)`, and `blobResourceUri(index)` so other modules can construct and embed dynamic resources directly (e.g., from prompts).\n- `files.ts`\n  - Registers static file-based resources for each file in the `docs/` folder.\n  - URIs follow the pattern: `demo://resource/static/document/<filename>`.\n  - Serves markdown files as `text/markdown`, `.txt` as `text/plain`, `.json` as `application/json`, others default to `text/plain`.\n\n### `server/`\n\n- `index.ts`\n  - Server factory that creates an `McpServer` with declared capabilities, loads server instructions, and registers tools, prompts, and resources.\n  - Sets resource subscription handlers via `setSubscriptionHandlers(server)`.\n  - Exposes `{ server, cleanup }` to the chosen transport. Cleanup stops any running intervals in the server when the transport disconnects.\n- `logging.ts`\n  - Implements simulated logging. Periodically sends randomized log messages at various levels to the connected client session. Started/stopped on demand via a dedicated tool.\n\n### `tools/`\n\n- `index.ts`\n  - `registerTools(server)` orchestrator; delegates to tool factory/registration methods in individual tool files.\n- `echo.ts`\n  - Registers an `echo` tool that takes a message and returns `Echo: {message}`.\n- `get-annotated-message.ts`\n  - Registers an `annotated-message` tool which demonstrates annotated content items by emitting a primary `text` message with `annotations` that vary by `messageType` (`\"error\" | \"success\" | \"debug\"`), and optionally includes an annotated `image` (tiny PNG) when `includeImage` is true.\n- `get-env.ts`\n  - Registers a `get-env` tool that returns the current process environment variables as formatted JSON text; useful for debugging configuration.\n- `get-resource-links.ts`\n  - Registers a `get-resource-links` tool that returns an intro `text` block followed by multiple `resource_link` items.\n- `get-resource-reference.ts`\n  - Registers a `get-resource-reference` tool that returns a reference for a selected dynamic resource.\n- `get-roots-list.ts`\n  - Registers a `get-roots-list` tool that returns the last list of roots sent by the client.\n- `gzip-file-as-resource.ts`\n  - Registers a `gzip-file-as-resource` tool that fetches content from a URL or data URI, compresses it, and then either:\n    - returns a `resource_link` to a session-scoped resource (default), or\n    - returns an inline `resource` with the gzipped data. The resource will be still discoverable for the duration of the session via `resources/list`.\n  - Uses `resources/session.ts` to register the gzipped blob as a per-session resource at a URI like `demo://resource/session/<name>` with `mimeType: application/gzip`.\n  - Environment controls:\n    - `GZIP_MAX_FETCH_SIZE` (bytes, default 10 MiB)\n    - `GZIP_MAX_FETCH_TIME_MILLIS` (ms, default 30000)\n    - `GZIP_ALLOWED_DOMAINS` (comma-separated allowlist; empty means all domains allowed)\n- `trigger-elicitation-request.ts`\n  - Registers a `trigger-elicitation-request` tool that sends an `elicitation/create` request to the client/LLM and returns the elicitation result.\n- `trigger-sampling-request.ts`\n  - Registers a `trigger-sampling-request` tool that sends a `sampling/createMessage` request to the client/LLM and returns the sampling result.\n- `get-structured-content.ts`\n  - Registers a `get-structured-content` tool that demonstrates structuredContent block responses.\n- `get-sum.ts`\n  - Registers an `get-sum` tool with a Zod input schema that sums two numbers `a` and `b` and returns the result.\n- `get-tiny-image.ts`\n  - Registers a `get-tiny-image` tool, which returns a tiny PNG MCP logo as an `image` content item, along with surrounding descriptive `text` items.\n- `trigger-long-running-operation.ts`\n  - Registers a `long-running-operation` tool that simulates a long-running task over a specified `duration` (seconds) and number of `steps`; emits `notifications/progress` updates when the client supplies a `progressToken`.\n- `toggle-simulated-logging.ts`\n  - Registers a `toggle-simulated-logging` tool, which starts or stops simulated logging for the invoking session.\n- `toggle-subscriber-updates.ts`\n  - Registers a `toggle-subscriber-updates` tool, which starts or stops simulated resource subscription update checks for the invoking session.\n\n### `transports/`\n\n- `stdio.ts`\n  - Starts a `StdioServerTransport`, created the server via `createServer()`, and connects it.\n  - Handles `SIGINT` to close cleanly and calls `cleanup()` to remove any live intervals.\n- `sse.ts`\n  - Express server exposing:\n    - `GET /sse` to establish an SSE connection per session.\n    - `POST /message` for client messages.\n  - Manages multiple connected clients via a transport map.\n  - Starts an `SSEServerTransport`, created the server via `createServer()`, and connects it to a new transport.\n  - On server disconnect, calls `cleanup()` to remove any live intervals.\n- `streamableHttp.ts`\n  - Express server exposing a single `/mcp` endpoint for POST (JSON‑RPC), GET (SSE stream), and DELETE (session termination) using `StreamableHTTPServerTransport`.\n  - Uses an `InMemoryEventStore` for resumable sessions and tracks transports by `sessionId`.\n  - Connects a fresh server instance on initialization POST and reuses the transport for subsequent requests.\n"
  },
  {
    "path": "src/everything/index.ts",
    "content": "#!/usr/bin/env node\n\n// Parse command line arguments first\nconst args = process.argv.slice(2);\nconst scriptName = args[0] || \"stdio\";\n\nasync function run() {\n  try {\n    // Dynamically import only the requested module to prevent all modules from initializing\n    switch (scriptName) {\n      case \"stdio\":\n        // Import and run the default server\n        await import(\"./transports/stdio.js\");\n        break;\n      case \"sse\":\n        // Import and run the SSE server\n        await import(\"./transports/sse.js\");\n        break;\n      case \"streamableHttp\":\n        // Import and run the streamable HTTP server\n        await import(\"./transports/streamableHttp.js\");\n        break;\n      default:\n        console.error(`-`.repeat(53));\n        console.error(`  Everything Server Launcher`);\n        console.error(`  Usage: node ./index.js [stdio|sse|streamableHttp]`);\n        console.error(`  Default transport: stdio`);\n        console.error(`-`.repeat(53));\n        console.error(`Unknown transport: ${scriptName}`);\n        console.log(\"Available transports:\");\n        console.log(\"- stdio\");\n        console.log(\"- sse\");\n        console.log(\"- streamableHttp\");\n        process.exit(1);\n    }\n  } catch (error) {\n    console.error(\"Error running script:\", error);\n    process.exit(1);\n  }\n}\n\nawait run();\n"
  },
  {
    "path": "src/everything/package.json",
    "content": "{\n  \"name\": \"@modelcontextprotocol/server-everything\",\n  \"version\": \"2.0.0\",\n  \"description\": \"MCP server that exercises all the features of the MCP protocol\",\n  \"license\": \"SEE LICENSE IN LICENSE\",\n  \"mcpName\": \"io.github.modelcontextprotocol/server-everything\",\n  \"author\": \"Model Context Protocol a Series of LF Projects, LLC.\",\n  \"homepage\": \"https://modelcontextprotocol.io\",\n  \"bugs\": \"https://github.com/modelcontextprotocol/servers/issues\",\n  \"repository\": {\n    \"type\": \"git\",\n    \"url\": \"https://github.com/modelcontextprotocol/servers.git\"\n  },\n  \"type\": \"module\",\n  \"bin\": {\n    \"mcp-server-everything\": \"dist/index.js\"\n  },\n  \"files\": [\n    \"dist\"\n  ],\n  \"scripts\": {\n    \"build\": \"tsc && shx cp -r docs dist/ && shx chmod +x dist/*.js\",\n    \"prepare\": \"npm run build\",\n    \"watch\": \"tsc --watch\",\n    \"start:stdio\": \"node dist/index.js stdio\",\n    \"start:sse\": \"node dist/index.js sse\",\n    \"start:streamableHttp\": \"node dist/index.js streamableHttp\",\n    \"prettier:fix\": \"prettier --write .\",\n    \"prettier:check\": \"prettier --check .\",\n    \"test\": \"vitest run --coverage\"\n  },\n  \"dependencies\": {\n    \"@modelcontextprotocol/sdk\": \"^1.26.0\",\n    \"cors\": \"^2.8.5\",\n    \"express\": \"^5.2.1\",\n    \"jszip\": \"^3.10.1\",\n    \"zod\": \"^3.25.0\",\n    \"zod-to-json-schema\": \"^3.23.5\"\n  },\n  \"devDependencies\": {\n    \"@types/cors\": \"^2.8.19\",\n    \"@types/express\": \"^5.0.6\",\n    \"@vitest/coverage-v8\": \"^2.1.8\",\n    \"shx\": \"^0.3.4\",\n    \"typescript\": \"^5.6.2\",\n    \"prettier\": \"^2.8.8\",\n    \"vitest\": \"^2.1.8\"\n  }\n}\n"
  },
  {
    "path": "src/everything/prompts/args.ts",
    "content": "import { z } from \"zod\";\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\n\n/**\n * Register a prompt with arguments\n * - Two arguments, one required and one optional\n * - Combines argument values in the returned prompt\n *\n * @param server\n */\nexport const registerArgumentsPrompt = (server: McpServer) => {\n  // Prompt arguments\n  const promptArgsSchema = {\n    city: z.string().describe(\"Name of the city\"),\n    state: z.string().describe(\"Name of the state\").optional(),\n  };\n\n  // Register the prompt\n  server.registerPrompt(\n    \"args-prompt\",\n    {\n      title: \"Arguments Prompt\",\n      description: \"A prompt with two arguments, one required and one optional\",\n      argsSchema: promptArgsSchema,\n    },\n    (args) => {\n      const location = `${args?.city}${args?.state ? `, ${args?.state}` : \"\"}`;\n      return {\n        messages: [\n          {\n            role: \"user\",\n            content: {\n              type: \"text\",\n              text: `What's weather in ${location}?`,\n            },\n          },\n        ],\n      };\n    }\n  );\n};\n"
  },
  {
    "path": "src/everything/prompts/completions.ts",
    "content": "import { z } from \"zod\";\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { completable } from \"@modelcontextprotocol/sdk/server/completable.js\";\n\n/**\n * Register a prompt with completable arguments\n * - Two required arguments, both with completion handlers\n * - First argument value will be included in context for second argument\n * - Allows second argument to depend on the first argument value\n *\n * @param server\n */\nexport const registerPromptWithCompletions = (server: McpServer) => {\n  // Prompt arguments\n  const promptArgsSchema = {\n    department: completable(\n      z.string().describe(\"Choose the department.\"),\n      (value) => {\n        return [\"Engineering\", \"Sales\", \"Marketing\", \"Support\"].filter((d) =>\n          d.startsWith(value)\n        );\n      }\n    ),\n    name: completable(\n      z\n        .string()\n        .describe(\"Choose a team member to lead the selected department.\"),\n      (value, context) => {\n        const department = context?.arguments?.[\"department\"];\n        if (department === \"Engineering\") {\n          return [\"Alice\", \"Bob\", \"Charlie\"].filter((n) => n.startsWith(value));\n        } else if (department === \"Sales\") {\n          return [\"David\", \"Eve\", \"Frank\"].filter((n) => n.startsWith(value));\n        } else if (department === \"Marketing\") {\n          return [\"Grace\", \"Henry\", \"Iris\"].filter((n) => n.startsWith(value));\n        } else if (department === \"Support\") {\n          return [\"John\", \"Kim\", \"Lee\"].filter((n) => n.startsWith(value));\n        }\n        return [];\n      }\n    ),\n  };\n\n  // Register the prompt\n  server.registerPrompt(\n    \"completable-prompt\",\n    {\n      title: \"Team Management\",\n      description: \"First argument choice narrows values for second argument.\",\n      argsSchema: promptArgsSchema,\n    },\n    ({ department, name }) => ({\n      messages: [\n        {\n          role: \"user\",\n          content: {\n            type: \"text\",\n            text: `Please promote ${name} to the head of the ${department} team.`,\n          },\n        },\n      ],\n    })\n  );\n};\n"
  },
  {
    "path": "src/everything/prompts/index.ts",
    "content": "import { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { registerSimplePrompt } from \"./simple.js\";\nimport { registerArgumentsPrompt } from \"./args.js\";\nimport { registerPromptWithCompletions } from \"./completions.js\";\nimport { registerEmbeddedResourcePrompt } from \"./resource.js\";\n\n/**\n * Register the prompts with the MCP server.\n *\n * @param server\n */\nexport const registerPrompts = (server: McpServer) => {\n  registerSimplePrompt(server);\n  registerArgumentsPrompt(server);\n  registerPromptWithCompletions(server);\n  registerEmbeddedResourcePrompt(server);\n};\n"
  },
  {
    "path": "src/everything/prompts/resource.ts",
    "content": "import { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport {\n  resourceTypeCompleter,\n  resourceIdForPromptCompleter,\n} from \"../resources/templates.js\";\nimport {\n  textResource,\n  textResourceUri,\n  blobResourceUri,\n  blobResource,\n  RESOURCE_TYPE_BLOB,\n  RESOURCE_TYPE_TEXT,\n  RESOURCE_TYPES,\n} from \"../resources/templates.js\";\n\n/**\n * Register a prompt with an embedded resource reference\n * - Takes a resource type and id\n * - Returns the corresponding dynamically created resource\n *\n * @param server\n */\nexport const registerEmbeddedResourcePrompt = (server: McpServer) => {\n  // Prompt arguments\n  const promptArgsSchema = {\n    resourceType: resourceTypeCompleter,\n    resourceId: resourceIdForPromptCompleter,\n  };\n\n  // Register the prompt\n  server.registerPrompt(\n    \"resource-prompt\",\n    {\n      title: \"Resource Prompt\",\n      description: \"A prompt that includes an embedded resource reference\",\n      argsSchema: promptArgsSchema,\n    },\n    (args) => {\n      // Validate resource type argument\n      const resourceType = args.resourceType;\n      if (\n        !RESOURCE_TYPES.includes(\n          resourceType as typeof RESOURCE_TYPE_TEXT | typeof RESOURCE_TYPE_BLOB\n        )\n      ) {\n        throw new Error(\n          `Invalid resourceType: ${args?.resourceType}. Must be ${RESOURCE_TYPE_TEXT} or ${RESOURCE_TYPE_BLOB}.`\n        );\n      }\n\n      // Validate resourceId argument\n      const resourceId = Number(args?.resourceId);\n      if (\n        !Number.isFinite(resourceId) ||\n        !Number.isInteger(resourceId) ||\n        resourceId < 1\n      ) {\n        throw new Error(\n          `Invalid resourceId: ${args?.resourceId}. Must be a finite positive integer.`\n        );\n      }\n\n      // Get resource based on the resource type\n      const uri =\n        resourceType === RESOURCE_TYPE_TEXT\n          ? textResourceUri(resourceId)\n          : blobResourceUri(resourceId);\n      const resource =\n        resourceType === RESOURCE_TYPE_TEXT\n          ? textResource(uri, resourceId)\n          : blobResource(uri, resourceId);\n\n      return {\n        messages: [\n          {\n            role: \"user\",\n            content: {\n              type: \"text\",\n              text: `This prompt includes the ${resourceType} resource with id: ${resourceId}. Please analyze the following resource:`,\n            },\n          },\n          {\n            role: \"user\",\n            content: {\n              type: \"resource\",\n              resource: resource,\n            },\n          },\n        ],\n      };\n    }\n  );\n};\n"
  },
  {
    "path": "src/everything/prompts/simple.ts",
    "content": "import { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\n\n/**\n * Register a simple prompt with no arguments\n * - Returns the fixed text of the prompt with no modifications\n *\n * @param server\n */\nexport const registerSimplePrompt = (server: McpServer) => {\n  // Register the prompt\n  server.registerPrompt(\n    \"simple-prompt\",\n    {\n      title: \"Simple Prompt\",\n      description: \"A prompt with no arguments\",\n    },\n    () => ({\n      messages: [\n        {\n          role: \"user\",\n          content: {\n            type: \"text\",\n            text: \"This is a simple prompt without arguments.\",\n          },\n        },\n      ],\n    })\n  );\n};\n"
  },
  {
    "path": "src/everything/resources/files.ts",
    "content": "import { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { dirname, join } from \"path\";\nimport { fileURLToPath } from \"url\";\nimport { readdirSync, readFileSync, statSync } from \"fs\";\n\n/**\n * Register static file resources\n * - Each file in src/everything/docs is exposed as an individual static resource\n * - URIs follow the pattern: \"demo://static/docs/<filename>\"\n * - Markdown (.md) files are served as mime type \"text/markdown\"\n * - Text (.txt) files are served as mime type \"text/plain\"\n * - JSON (.json) files are served as mime type \"application/json\"\n *\n * @param server\n */\nexport const registerFileResources = (server: McpServer) => {\n  // Read the entries in the docs directory\n  const __filename = fileURLToPath(import.meta.url);\n  const __dirname = dirname(__filename);\n  const docsDir = join(__dirname, \"..\", \"docs\");\n  let entries: string[] = [];\n  try {\n    entries = readdirSync(docsDir);\n  } catch (e) {\n    // If docs/ folder is missing or unreadable, just skip registration\n    return;\n  }\n\n  // Register each file as a static resource\n  for (const name of entries) {\n    // Only process files, not directories\n    const fullPath = join(docsDir, name);\n    try {\n      const st = statSync(fullPath);\n      if (!st.isFile()) continue;\n    } catch {\n      continue;\n    }\n\n    // Prepare file resource info\n    const uri = `demo://resource/static/document/${encodeURIComponent(name)}`;\n    const mimeType = getMimeType(name);\n    const description = `Static document file exposed from /docs: ${name}`;\n\n    // Register file resource\n    server.registerResource(\n      name,\n      uri,\n      { mimeType, description },\n      async (uri) => {\n        const text = readFileSafe(fullPath);\n        return {\n          contents: [\n            {\n              uri: uri.toString(),\n              mimeType,\n              text,\n            },\n          ],\n        };\n      }\n    );\n  }\n};\n\n/**\n * Get the mimetype based on filename\n * @param fileName\n */\nfunction getMimeType(fileName: string): string {\n  const lower = fileName.toLowerCase();\n  if (lower.endsWith(\".md\") || lower.endsWith(\".markdown\"))\n    return \"text/markdown\";\n  if (lower.endsWith(\".txt\")) return \"text/plain\";\n  if (lower.endsWith(\".json\")) return \"application/json\";\n  return \"text/plain\";\n}\n\n/**\n * Read a file or return an error message if it fails\n * @param path\n */\nfunction readFileSafe(path: string): string {\n  try {\n    return readFileSync(path, \"utf-8\");\n  } catch (e) {\n    return `Error reading file: ${path}. ${e}`;\n  }\n}\n"
  },
  {
    "path": "src/everything/resources/index.ts",
    "content": "import { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { registerResourceTemplates } from \"./templates.js\";\nimport { registerFileResources } from \"./files.js\";\nimport { fileURLToPath } from \"url\";\nimport { dirname, join } from \"path\";\nimport { readFileSync } from \"fs\";\n\n/**\n * Register the resources with the MCP server.\n * @param server\n */\nexport const registerResources = (server: McpServer) => {\n  registerResourceTemplates(server);\n  registerFileResources(server);\n};\n\n/**\n * Reads the server instructions from the corresponding markdown file.\n * Attempts to load the content of the file located in the `docs` directory.\n * If the file cannot be loaded, an error message is returned instead.\n *\n * @return {string} The content of the server instructions file, or an error message if reading fails.\n */\nexport function readInstructions(): string {\n  const __filename = fileURLToPath(import.meta.url);\n  const __dirname = dirname(__filename);\n  const filePath = join(__dirname, \"..\", \"docs\", \"instructions.md\");\n  let instructions;\n\n  try {\n    instructions = readFileSync(filePath, \"utf-8\");\n  } catch (e) {\n    instructions = \"Server instructions not loaded: \" + e;\n  }\n  return instructions;\n}\n"
  },
  {
    "path": "src/everything/resources/session.ts",
    "content": "import { McpServer, RegisteredResource } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { Resource, ResourceLink } from \"@modelcontextprotocol/sdk/types.js\";\n\n/**\n * Tracks registered session resources by URI to allow updating/removing on re-registration.\n * This prevents \"Resource already registered\" errors when a tool creates a resource\n * with the same URI multiple times during a session.\n */\nconst registeredResources = new Map<string, RegisteredResource>();\n\n/**\n * Generates a session-scoped resource URI string based on the provided resource name.\n *\n * @param {string} name - The name of the resource to create a URI for.\n * @returns {string} The formatted session resource URI.\n */\nexport const getSessionResourceURI = (name: string): string => {\n  return `demo://resource/session/${name}`;\n};\n\n/**\n * Registers a session-scoped resource with the provided server and returns a resource link.\n *\n * The registered resource is available during the life of the session only; it is not otherwise persisted.\n *\n * @param {McpServer} server - The server instance responsible for handling the resource registration.\n * @param {Resource} resource - The resource object containing metadata such as URI, name, description, and mimeType.\n * @param {\"text\"|\"blob\"} type\n * @param payload\n * @returns {ResourceLink} An object representing the resource link, with associated metadata.\n */\nexport const registerSessionResource = (\n  server: McpServer,\n  resource: Resource,\n  type: \"text\" | \"blob\",\n  payload: string\n): ResourceLink => {\n  // Destructure resource\n  const { uri, name, mimeType, description, title, annotations, icons, _meta } =\n    resource;\n\n  // Prepare the resource content to return\n  // See https://modelcontextprotocol.io/specification/2025-11-25/server/resources#resource-contents\n  const resourceContent =\n    type === \"text\"\n      ? {\n          uri: uri.toString(),\n          mimeType,\n          text: payload,\n        }\n      : {\n          uri: uri.toString(),\n          mimeType,\n          blob: payload,\n        };\n\n  // Check if a resource with this URI is already registered and remove it\n  const existingResource = registeredResources.get(uri);\n  if (existingResource) {\n    existingResource.remove();\n    registeredResources.delete(uri);\n  }\n\n  // Register file resource\n  const registeredResource = server.registerResource(\n    name,\n    uri,\n    { mimeType, description, title, annotations, icons, _meta },\n    async () => {\n      return {\n        contents: [resourceContent],\n      };\n    }\n  );\n\n  // Track the registered resource for potential future removal\n  registeredResources.set(uri, registeredResource);\n\n  return { type: \"resource_link\", ...resource };\n};\n"
  },
  {
    "path": "src/everything/resources/subscriptions.ts",
    "content": "import { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport {\n  SubscribeRequestSchema,\n  UnsubscribeRequestSchema,\n} from \"@modelcontextprotocol/sdk/types.js\";\n\n// Track subscriber session id lists by URI\nconst subscriptions: Map<string, Set<string | undefined>> = new Map<\n  string,\n  Set<string | undefined>\n>();\n\n// Interval to send notifications to subscribers\nconst subsUpdateIntervals: Map<string | undefined, NodeJS.Timeout | undefined> =\n  new Map<string | undefined, NodeJS.Timeout | undefined>();\n\n/**\n * Sets up the subscription and unsubscription handlers for the provided server.\n *\n * The function defines two request handlers:\n * 1. A `Subscribe` handler that allows clients to subscribe to specific resource URIs.\n * 2. An `Unsubscribe` handler that allows clients to unsubscribe from specific resource URIs.\n *\n * The `Subscribe` handler performs the following actions:\n * - Extracts the URI and session ID from the request.\n * - Logs a message acknowledging the subscription request.\n * - Updates the internal tracking of subscribers for the given URI.\n *\n * The `Unsubscribe` handler performs the following actions:\n * - Extracts the URI and session ID from the request.\n * - Logs a message acknowledging the unsubscription request.\n * - Removes the subscriber for the specified URI.\n *\n * @param {McpServer} server - The server instance to which subscription handlers will be attached.\n */\nexport const setSubscriptionHandlers = (server: McpServer) => {\n  // Set the subscription handler\n  server.server.setRequestHandler(\n    SubscribeRequestSchema,\n    async (request, extra) => {\n      // Get the URI to subscribe to\n      const { uri } = request.params;\n\n      // Get the session id (can be undefined for stdio)\n      const sessionId = extra.sessionId as string;\n\n      // Acknowledge the subscribe request\n      await server.sendLoggingMessage(\n        {\n          level: \"info\",\n          data: `Received Subscribe Resource request for URI: ${uri} ${\n            sessionId ? `from session ${sessionId}` : \"\"\n          }`,\n        },\n        sessionId\n      );\n\n      // Get the subscribers for this URI\n      const subscribers = subscriptions.has(uri)\n        ? (subscriptions.get(uri) as Set<string>)\n        : new Set<string>();\n      subscribers.add(sessionId);\n      subscriptions.set(uri, subscribers);\n      return {};\n    }\n  );\n\n  // Set the unsubscription handler\n  server.server.setRequestHandler(\n    UnsubscribeRequestSchema,\n    async (request, extra) => {\n      // Get the URI to subscribe to\n      const { uri } = request.params;\n\n      // Get the session id (can be undefined for stdio)\n      const sessionId = extra.sessionId as string;\n\n      // Acknowledge the subscribe request\n      await server.sendLoggingMessage(\n        {\n          level: \"info\",\n          data: `Received Unsubscribe Resource request: ${uri} ${\n            sessionId ? `from session ${sessionId}` : \"\"\n          }`,\n        },\n        sessionId\n      );\n\n      // Remove the subscriber\n      if (subscriptions.has(uri)) {\n        const subscribers = subscriptions.get(uri) as Set<string>;\n        if (subscribers.has(sessionId)) subscribers.delete(sessionId);\n      }\n      return {};\n    }\n  );\n};\n\n/**\n * Sends simulated resource update notifications to the subscribed client.\n *\n * This function iterates through all resource URIs stored in the subscriptions\n * and checks if the specified session ID is subscribed to them. If so, it sends\n * a notification through the provided server. If the session ID is no longer valid\n * (disconnected), it removes the session ID from the list of subscribers.\n *\n * @param {McpServer} server - The server instance used to send notifications.\n * @param {string | undefined} sessionId - The session ID of the client to check for subscriptions.\n * @returns {Promise<void>} Resolves once all applicable notifications are sent.\n */\nconst sendSimulatedResourceUpdates = async (\n  server: McpServer,\n  sessionId: string | undefined\n): Promise<void> => {\n  // Search all URIs for ones this client is subscribed to\n  for (const uri of subscriptions.keys()) {\n    const subscribers = subscriptions.get(uri) as Set<string | undefined>;\n\n    // If this client is subscribed, send the notification\n    if (subscribers.has(sessionId)) {\n      await server.server.notification({\n        method: \"notifications/resources/updated\",\n        params: { uri },\n      });\n    } else {\n      subscribers.delete(sessionId); // subscriber has disconnected\n    }\n  }\n};\n\n/**\n * Starts the process of simulating resource updates and sending server notifications\n * to the client for the resources they are subscribed to. If the update interval is\n * already active, invoking this function will not start another interval.\n *\n * @param server\n * @param sessionId\n */\nexport const beginSimulatedResourceUpdates = (\n  server: McpServer,\n  sessionId: string | undefined\n) => {\n  if (!subsUpdateIntervals.has(sessionId)) {\n    // Send once immediately\n    sendSimulatedResourceUpdates(server, sessionId);\n\n    // Set the interval to send later resource update notifications to this client\n    subsUpdateIntervals.set(\n      sessionId,\n      setInterval(() => sendSimulatedResourceUpdates(server, sessionId), 5000)\n    );\n  }\n};\n\n/**\n * Stops simulated resource updates for a given session.\n *\n * This function halts any active intervals associated with the provided session ID\n * and removes the session's corresponding entries from resource management collections.\n * Session ID can be undefined for stdio.\n *\n * @param {string} [sessionId]\n */\nexport const stopSimulatedResourceUpdates = (sessionId?: string) => {\n  // Remove active intervals\n  if (subsUpdateIntervals.has(sessionId)) {\n    const subsUpdateInterval = subsUpdateIntervals.get(sessionId);\n    clearInterval(subsUpdateInterval);\n    subsUpdateIntervals.delete(sessionId);\n  }\n};\n"
  },
  {
    "path": "src/everything/resources/templates.ts",
    "content": "import { z } from \"zod\";\nimport {\n  CompleteResourceTemplateCallback,\n  McpServer,\n  ResourceTemplate,\n} from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { completable } from \"@modelcontextprotocol/sdk/server/completable.js\";\n\n// Resource types\nexport const RESOURCE_TYPE_TEXT = \"Text\" as const;\nexport const RESOURCE_TYPE_BLOB = \"Blob\" as const;\nexport const RESOURCE_TYPES: string[] = [\n  RESOURCE_TYPE_TEXT,\n  RESOURCE_TYPE_BLOB,\n];\n\n/**\n * A completer function for resource types.\n *\n * This variable provides functionality to perform autocompletion for the resource types based on user input.\n * It uses a schema description to validate the input and filters through a predefined list of resource types\n * to return suggestions that start with the given input.\n *\n * The input value is expected to be a string representing the type of resource to fetch.\n * The completion logic matches the input against available resource types.\n */\nexport const resourceTypeCompleter = completable(\n  z.string().describe(\"Type of resource to fetch\"),\n  (value: string) => {\n    return RESOURCE_TYPES.filter((t) => t.startsWith(value));\n  }\n);\n\n/**\n * A completer function for resource IDs as strings.\n *\n * The `resourceIdCompleter` accepts a string input representing the ID of a text resource\n * and validates whether the provided value corresponds to an integer resource ID.\n *\n * NOTE: Currently, prompt arguments can only be strings since type is not field of `PromptArgument`\n * Consequently, we must define it as a string and convert the argument to number before using it\n * https://modelcontextprotocol.io/specification/2025-11-25/schema#promptargument\n *\n * If the value is a valid integer, it returns the value within an array.\n * Otherwise, it returns an empty array.\n *\n * The input string is first transformed into a number and checked to ensure it is an integer.\n * This helps validate and suggest appropriate resource IDs.\n */\nexport const resourceIdForPromptCompleter = completable(\n  z.string().describe(\"ID of the text resource to fetch\"),\n  (value: string) => {\n    const resourceId = Number(value);\n    return Number.isInteger(resourceId) && resourceId > 0 ? [value] : [];\n  }\n);\n\n/**\n * A callback function that acts as a completer for resource ID values, validating and returning\n * the input value as part of a resource template.\n *\n * @typedef {CompleteResourceTemplateCallback}\n * @param {string} value - The input string value to be evaluated as a resource ID.\n * @returns {string[]} Returns an array containing the input value if it represents a positive\n * integer resource ID, otherwise returns an empty array.\n */\nexport const resourceIdForResourceTemplateCompleter: CompleteResourceTemplateCallback =\n  (value: string) => {\n    const resourceId = Number(value);\n\n    return Number.isInteger(resourceId) && resourceId > 0 ? [value] : [];\n  };\n\nconst uriBase: string = \"demo://resource/dynamic\";\nconst textUriBase: string = `${uriBase}/text`;\nconst blobUriBase: string = `${uriBase}/blob`;\nconst textUriTemplate: string = `${textUriBase}/{resourceId}`;\nconst blobUriTemplate: string = `${blobUriBase}/{resourceId}`;\n\n/**\n * Create a dynamic text resource\n * - Exposed for use by embedded resource prompt example\n * @param uri\n * @param resourceId\n */\nexport const textResource = (uri: URL, resourceId: number) => {\n  const timestamp = new Date().toLocaleTimeString();\n  return {\n    uri: uri.toString(),\n    mimeType: \"text/plain\",\n    text: `Resource ${resourceId}: This is a plaintext resource created at ${timestamp}`,\n  };\n};\n\n/**\n * Create a dynamic blob resource\n * - Exposed for use by embedded resource prompt example\n * @param uri\n * @param resourceId\n */\nexport const blobResource = (uri: URL, resourceId: number) => {\n  const timestamp = new Date().toLocaleTimeString();\n  const resourceText = Buffer.from(\n    `Resource ${resourceId}: This is a base64 blob created at ${timestamp}`\n  ).toString(\"base64\");\n  return {\n    uri: uri.toString(),\n    mimeType: \"text/plain\",\n    blob: resourceText,\n  };\n};\n\n/**\n * Create a dynamic text resource URI\n * - Exposed for use by embedded resource prompt example\n * @param resourceId\n */\nexport const textResourceUri = (resourceId: number) =>\n  new URL(`${textUriBase}/${resourceId}`);\n\n/**\n * Create a dynamic blob resource URI\n * - Exposed for use by embedded resource prompt example\n * @param resourceId\n */\nexport const blobResourceUri = (resourceId: number) =>\n  new URL(`${blobUriBase}/${resourceId}`);\n\n/**\n * Parses the resource identifier from the provided URI and validates it\n * against the given variables. Throws an error if the URI corresponds\n * to an unknown resource or if the resource identifier is invalid.\n *\n * @param {URL} uri - The URI of the resource to be parsed.\n * @param {Record<string, unknown>} variables - A record containing context-specific variables that include the resourceId.\n * @returns {number} The parsed and validated resource identifier as an integer.\n * @throws {Error} Throws an error if the URI matches unsupported base URIs or if the resourceId is invalid.\n */\nconst parseResourceId = (uri: URL, variables: Record<string, unknown>) => {\n  const uriError = `Unknown resource: ${uri.toString()}`;\n  if (\n    uri.toString().startsWith(textUriBase) &&\n    uri.toString().startsWith(blobUriBase)\n  ) {\n    throw new Error(uriError);\n  } else {\n    const idxStr = String((variables as any).resourceId ?? \"\");\n    const idx = Number(idxStr);\n    if (Number.isFinite(idx) && Number.isInteger(idx) && idx > 0) {\n      return idx;\n    } else {\n      throw new Error(uriError);\n    }\n  }\n};\n\n/**\n * Register resource templates with the MCP server.\n * - Text and blob resources, dynamically generated from the URI {resourceId} variable\n * - Any finite positive integer is acceptable for the resourceId variable\n * - List resources method will not return these resources\n * - These are only accessible via template URIs\n * - Both blob and text resources:\n *   - have content that is dynamically generated, including a timestamp\n *   - have different template URIs\n *     - Blob: \"demo://resource/dynamic/blob/{resourceId}\"\n *     - Text: \"demo://resource/dynamic/text/{resourceId}\"\n *\n * @param server\n */\nexport const registerResourceTemplates = (server: McpServer) => {\n  // Register the text resource template\n  server.registerResource(\n    \"Dynamic Text Resource\",\n    new ResourceTemplate(textUriTemplate, {\n      list: undefined,\n      complete: { resourceId: resourceIdForResourceTemplateCompleter },\n    }),\n    {\n      mimeType: \"text/plain\",\n      description:\n        \"Plaintext dynamic resource fabricated from the {resourceId} variable, which must be an integer.\",\n    },\n    async (uri, variables) => {\n      const resourceId = parseResourceId(uri, variables);\n      return {\n        contents: [textResource(uri, resourceId)],\n      };\n    }\n  );\n\n  // Register the blob resource template\n  server.registerResource(\n    \"Dynamic Blob Resource\",\n    new ResourceTemplate(blobUriTemplate, {\n      list: undefined,\n      complete: { resourceId: resourceIdForResourceTemplateCompleter },\n    }),\n    {\n      mimeType: \"application/octet-stream\",\n      description:\n        \"Binary (base64) dynamic resource fabricated from the {resourceId} variable, which must be an integer.\",\n    },\n    async (uri, variables) => {\n      const resourceId = parseResourceId(uri, variables);\n      return {\n        contents: [blobResource(uri, resourceId)],\n      };\n    }\n  );\n};\n"
  },
  {
    "path": "src/everything/server/index.ts",
    "content": "import { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport {\n  InMemoryTaskStore,\n  InMemoryTaskMessageQueue,\n} from \"@modelcontextprotocol/sdk/experimental/tasks\";\nimport {\n  setSubscriptionHandlers,\n  stopSimulatedResourceUpdates,\n} from \"../resources/subscriptions.js\";\nimport { registerConditionalTools, registerTools } from \"../tools/index.js\";\nimport { registerResources, readInstructions } from \"../resources/index.js\";\nimport { registerPrompts } from \"../prompts/index.js\";\nimport { stopSimulatedLogging } from \"./logging.js\";\nimport { syncRoots } from \"./roots.js\";\n\n// Server Factory response\nexport type ServerFactoryResponse = {\n  server: McpServer;\n  cleanup: (sessionId?: string) => void;\n};\n\n/**\n * Server Factory\n *\n * This function initializes a `McpServer` with specific capabilities and instructions,\n * registers tools, resources, and prompts, and configures resource subscription handlers.\n *\n * @returns {ServerFactoryResponse} An object containing the server instance, and a `cleanup`\n * function for handling server-side cleanup when a session ends.\n *\n * Properties of the returned object:\n * - `server` {Object}: The initialized server instance.\n * - `cleanup` {Function}: Function to perform cleanup operations for a closing session.\n */\nexport const createServer: () => ServerFactoryResponse = () => {\n  // Read the server instructions\n  const instructions = readInstructions();\n\n  // Create task store and message queue for task support\n  const taskStore = new InMemoryTaskStore();\n  const taskMessageQueue = new InMemoryTaskMessageQueue();\n\n  let initializeTimeout: NodeJS.Timeout | null = null;\n\n  // Create the server\n  const server = new McpServer(\n    {\n      name: \"mcp-servers/everything\",\n      title: \"Everything Reference Server\",\n      version: \"2.0.0\",\n    },\n    {\n      capabilities: {\n        tools: {\n          listChanged: true,\n        },\n        prompts: {\n          listChanged: true,\n        },\n        resources: {\n          subscribe: true,\n          listChanged: true,\n        },\n        logging: {},\n        tasks: {\n          list: {},\n          cancel: {},\n          requests: {\n            tools: {\n              call: {},\n            },\n          },\n        },\n      },\n      instructions,\n      taskStore,\n      taskMessageQueue,\n    }\n  );\n\n  // Register the tools\n  registerTools(server);\n\n  // Register the resources\n  registerResources(server);\n\n  // Register the prompts\n  registerPrompts(server);\n\n  // Set resource subscription handlers\n  setSubscriptionHandlers(server);\n\n  // Perform post-initialization operations\n  server.server.oninitialized = async () => {\n    // Register conditional tools now that client capabilities are known.\n    // This finishes before the `notifications/initialized` handler finishes.\n    registerConditionalTools(server);\n\n    // Sync roots if the client supports them.\n    // This is delayed until after the `notifications/initialized` handler finishes,\n    // otherwise, the request gets lost.\n    const sessionId = server.server.transport?.sessionId;\n    initializeTimeout = setTimeout(() => syncRoots(server, sessionId), 350);\n  };\n\n  // Return the ServerFactoryResponse\n  return {\n    server,\n    cleanup: (sessionId?: string) => {\n      // Stop any simulated logging or resource updates that may have been initiated.\n      stopSimulatedLogging(sessionId);\n      stopSimulatedResourceUpdates(sessionId);\n      // Clean up task store timers\n      taskStore.cleanup();\n      if (initializeTimeout) clearTimeout(initializeTimeout);\n    },\n  } satisfies ServerFactoryResponse;\n};\n"
  },
  {
    "path": "src/everything/server/logging.ts",
    "content": "import { LoggingLevel } from \"@modelcontextprotocol/sdk/types.js\";\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\n\n// Map session ID to the interval for sending logging messages to the client\nconst logsUpdateIntervals: Map<string | undefined, NodeJS.Timeout | undefined> =\n  new Map<string | undefined, NodeJS.Timeout | undefined>();\n\n/**\n * Initiates a simulated logging process by sending random log messages to the client at a\n * fixed interval. Each log message contains a random logging level and optional session ID.\n *\n * @param {McpServer} server - The server instance responsible for handling the logging messages.\n * @param {string | undefined} sessionId - An optional identifier for the session. If provided,\n * the session ID will be appended to log messages.\n */\nexport const beginSimulatedLogging = (\n  server: McpServer,\n  sessionId: string | undefined\n) => {\n  const maybeAppendSessionId = sessionId ? ` - SessionId ${sessionId}` : \"\";\n  const messages: { level: LoggingLevel; data: string }[] = [\n    { level: \"debug\", data: `Debug-level message${maybeAppendSessionId}` },\n    { level: \"info\", data: `Info-level message${maybeAppendSessionId}` },\n    { level: \"notice\", data: `Notice-level message${maybeAppendSessionId}` },\n    {\n      level: \"warning\",\n      data: `Warning-level message${maybeAppendSessionId}`,\n    },\n    { level: \"error\", data: `Error-level message${maybeAppendSessionId}` },\n    {\n      level: \"critical\",\n      data: `Critical-level message${maybeAppendSessionId}`,\n    },\n    { level: \"alert\", data: `Alert level-message${maybeAppendSessionId}` },\n    {\n      level: \"emergency\",\n      data: `Emergency-level message${maybeAppendSessionId}`,\n    },\n  ];\n\n  /**\n   * Send a simulated logging message to the client\n   */\n  const sendSimulatedLoggingMessage = async (sessionId: string | undefined) => {\n    // By using the `sendLoggingMessage` function to send the message, we\n    // ensure that the client's chosen logging level will be respected\n    await server.sendLoggingMessage(\n      messages[Math.floor(Math.random() * messages.length)],\n      sessionId\n    );\n  };\n\n  // Set the interval to send later logging messages to this client\n  if (!logsUpdateIntervals.has(sessionId)) {\n    // Send once immediately\n    sendSimulatedLoggingMessage(sessionId);\n\n    // Send a randomly-leveled log message every 5 seconds\n    logsUpdateIntervals.set(\n      sessionId,\n      setInterval(() => sendSimulatedLoggingMessage(sessionId), 5000)\n    );\n  }\n};\n\n/**\n * Stops the simulated logging process for a given session.\n *\n * This function halts the periodic logging updates associated with the specified\n * session ID by clearing the interval and removing the session's tracking\n * reference. Session ID can be undefined for stdio.\n *\n * @param {string} [sessionId] - The optional unique identifier of the session.\n */\nexport const stopSimulatedLogging = (sessionId?: string) => {\n  // Remove active intervals\n  if (logsUpdateIntervals.has(sessionId)) {\n    const logsUpdateInterval = logsUpdateIntervals.get(sessionId);\n    clearInterval(logsUpdateInterval);\n    logsUpdateIntervals.delete(sessionId);\n  }\n};\n"
  },
  {
    "path": "src/everything/server/roots.ts",
    "content": "import { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport {\n  Root,\n  RootsListChangedNotificationSchema,\n} from \"@modelcontextprotocol/sdk/types.js\";\n\n// Track roots by session id\nexport const roots: Map<string | undefined, Root[]> = new Map<\n  string | undefined,\n  Root[]\n>();\n\n/**\n * Get the latest the client roots list for the session.\n *\n * - Request and cache the roots list for the session if it has not been fetched before.\n * - Return the cached roots list for the session if it exists.\n *\n * When requesting the roots list for a session, it also sets up a `roots/list_changed`\n * notification handler. This ensures that updates are automatically fetched and handled\n * in real-time.\n *\n * This function is idempotent. It should only request roots from the client once per session,\n * returning the cached version thereafter.\n *\n * @param {McpServer} server - An instance of the MCP server used to communicate with the client.\n * @param {string} [sessionId] - An optional session id used to associate the roots list with a specific client session.\n *\n * @throws {Error} In case of a failure to request the roots from the client, an error log message is sent.\n */\nexport const syncRoots = async (server: McpServer, sessionId?: string) => {\n  const clientCapabilities = server.server.getClientCapabilities() || {};\n  const clientSupportsRoots: boolean = clientCapabilities?.roots !== undefined;\n\n  // Fetch the roots list for this client\n  if (clientSupportsRoots) {\n    // Function to request the updated roots list from the client\n    const requestRoots = async () => {\n      try {\n        // Request the updated roots list from the client\n        const response = await server.server.listRoots();\n        if (response && \"roots\" in response) {\n          // Store the roots list for this client\n          roots.set(sessionId, response.roots);\n\n          // Notify the client of roots received\n          await server.sendLoggingMessage(\n            {\n              level: \"info\",\n              logger: \"everything-server\",\n              data: `Roots updated: ${response?.roots?.length} root(s) received from client`,\n            },\n            sessionId\n          );\n        } else {\n          await server.sendLoggingMessage(\n            {\n              level: \"info\",\n              logger: \"everything-server\",\n              data: \"Client returned no roots set\",\n            },\n            sessionId\n          );\n        }\n      } catch (error) {\n        console.error(\n          `Failed to request roots from client ${sessionId}: ${\n            error instanceof Error ? error.message : String(error)\n          }`\n        );\n      }\n    };\n\n    // If the roots have not been synced for this client,\n    // set notification handler and request initial roots\n    if (!roots.has(sessionId)) {\n      // Set the list changed notification handler\n      server.server.setNotificationHandler(\n        RootsListChangedNotificationSchema,\n        requestRoots\n      );\n\n      // Request the initial roots list immediately\n      await requestRoots();\n    }\n\n    // Return the roots list for this client\n    return roots.get(sessionId);\n  }\n};\n"
  },
  {
    "path": "src/everything/tools/echo.ts",
    "content": "import { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { CallToolResult } from \"@modelcontextprotocol/sdk/types.js\";\nimport { z } from \"zod\";\n\n// Tool input schema\nexport const EchoSchema = z.object({\n  message: z.string().describe(\"Message to echo\"),\n});\n\n// Tool configuration\nconst name = \"echo\";\nconst config = {\n  title: \"Echo Tool\",\n  description: \"Echoes back the input string\",\n  inputSchema: EchoSchema,\n};\n\n/**\n * Registers the 'echo' tool.\n *\n * The registered tool validates input arguments using the EchoSchema and\n * returns a response that echoes the message provided in the arguments.\n *\n * @param {McpServer} server - The McpServer instance where the tool will be registered.\n * @returns {void}\n */\nexport const registerEchoTool = (server: McpServer) => {\n  server.registerTool(name, config, async (args): Promise<CallToolResult> => {\n    const validatedArgs = EchoSchema.parse(args);\n    return {\n      content: [{ type: \"text\", text: `Echo: ${validatedArgs.message}` }],\n    };\n  });\n};\n"
  },
  {
    "path": "src/everything/tools/get-annotated-message.ts",
    "content": "import { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { CallToolResult } from \"@modelcontextprotocol/sdk/types.js\";\nimport { z } from \"zod\";\nimport { MCP_TINY_IMAGE } from \"./get-tiny-image.js\";\n\n// Tool input schema\nconst GetAnnotatedMessageSchema = z.object({\n  messageType: z\n    .enum([\"error\", \"success\", \"debug\"])\n    .describe(\"Type of message to demonstrate different annotation patterns\"),\n  includeImage: z\n    .boolean()\n    .default(false)\n    .describe(\"Whether to include an example image\"),\n});\n\n// Tool configuration\nconst name = \"get-annotated-message\";\nconst config = {\n  title: \"Get Annotated Message Tool\",\n  description:\n    \"Demonstrates how annotations can be used to provide metadata about content.\",\n  inputSchema: GetAnnotatedMessageSchema,\n};\n\n/**\n * Registers the 'get-annotated-message' tool.\n *\n * The registered tool generates and sends messages with specific types, such as error,\n * success, or debug, carrying associated annotations like priority level and intended\n * audience.\n *\n * The response will have annotations and optionally contain an annotated image.\n *\n * @function\n * @param {McpServer} server - The McpServer instance where the tool will be registered.\n */\nexport const registerGetAnnotatedMessageTool = (server: McpServer) => {\n  server.registerTool(name, config, async (args): Promise<CallToolResult> => {\n    const { messageType, includeImage } = GetAnnotatedMessageSchema.parse(args);\n\n    const content: CallToolResult[\"content\"] = [];\n\n    // Main message with different priorities/audiences based on type\n    if (messageType === \"error\") {\n      content.push({\n        type: \"text\",\n        text: \"Error: Operation failed\",\n        annotations: {\n          priority: 1.0, // Errors are highest priority\n          audience: [\"user\", \"assistant\"], // Both need to know about errors\n        },\n      });\n    } else if (messageType === \"success\") {\n      content.push({\n        type: \"text\",\n        text: \"Operation completed successfully\",\n        annotations: {\n          priority: 0.7, // Success messages are important but not critical\n          audience: [\"user\"], // Success mainly for user consumption\n        },\n      });\n    } else if (messageType === \"debug\") {\n      content.push({\n        type: \"text\",\n        text: \"Debug: Cache hit ratio 0.95, latency 150ms\",\n        annotations: {\n          priority: 0.3, // Debug info is low priority\n          audience: [\"assistant\"], // Technical details for assistant\n        },\n      });\n    }\n\n    // Optional image with its own annotations\n    if (includeImage) {\n      content.push({\n        type: \"image\",\n        data: MCP_TINY_IMAGE,\n        mimeType: \"image/png\",\n        annotations: {\n          priority: 0.5,\n          audience: [\"user\"], // Images primarily for user visualization\n        },\n      });\n    }\n\n    return { content };\n  });\n};\n"
  },
  {
    "path": "src/everything/tools/get-env.ts",
    "content": "import { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { CallToolResult } from \"@modelcontextprotocol/sdk/types.js\";\n\n// Tool configuration\nconst name = \"get-env\";\nconst config = {\n  title: \"Print Environment Tool\",\n  description:\n    \"Returns all environment variables, helpful for debugging MCP server configuration\",\n  inputSchema: {},\n};\n\n/**\n * Registers the 'get-env' tool.\n *\n * The registered tool Retrieves and returns the environment variables\n * of the current process as a JSON-formatted string encapsulated in a text response.\n *\n * @param {McpServer} server - The McpServer instance where the tool will be registered.\n * @returns {void}\n */\nexport const registerGetEnvTool = (server: McpServer) => {\n  server.registerTool(name, config, async (args): Promise<CallToolResult> => {\n    return {\n      content: [\n        {\n          type: \"text\",\n          text: JSON.stringify(process.env, null, 2),\n        },\n      ],\n    };\n  });\n};\n"
  },
  {
    "path": "src/everything/tools/get-resource-links.ts",
    "content": "import { z } from \"zod\";\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { CallToolResult } from \"@modelcontextprotocol/sdk/types.js\";\nimport {\n  textResource,\n  textResourceUri,\n  blobResourceUri,\n  blobResource,\n} from \"../resources/templates.js\";\n\n// Tool input schema\nconst GetResourceLinksSchema = z.object({\n  count: z\n    .number()\n    .min(1)\n    .max(10)\n    .default(3)\n    .describe(\"Number of resource links to return (1-10)\"),\n});\n\n// Tool configuration\nconst name = \"get-resource-links\";\nconst config = {\n  title: \"Get Resource Links Tool\",\n  description:\n    \"Returns up to ten resource links that reference different types of resources\",\n  inputSchema: GetResourceLinksSchema,\n};\n\n/**\n * Registers the 'get-resource-reference' tool.\n *\n * The registered tool retrieves a specified number of resource links and their metadata.\n * Resource links are dynamically generated as either text or binary blob resources,\n * based on their ID being even or odd.\n\n * The response contains a \"text\" introductory block and multiple \"resource_link\" blocks.\n *\n * @param {McpServer} server - The McpServer instance where the tool will be registered.\n */\nexport const registerGetResourceLinksTool = (server: McpServer) => {\n  server.registerTool(name, config, async (args): Promise<CallToolResult> => {\n    const { count } = GetResourceLinksSchema.parse(args);\n\n    // Add intro text content block\n    const content: CallToolResult[\"content\"] = [];\n    content.push({\n      type: \"text\",\n      text: `Here are ${count} resource links to resources available in this server:`,\n    });\n\n    // Create resource link content blocks\n    for (let resourceId = 1; resourceId <= count; resourceId++) {\n      // Get resource uri for text or blob resource based on odd/even resourceId\n      const isOdd = resourceId % 2 === 0;\n      const uri = isOdd\n        ? textResourceUri(resourceId)\n        : blobResourceUri(resourceId);\n\n      // Get resource based on the resource type\n      const resource = isOdd\n        ? textResource(uri, resourceId)\n        : blobResource(uri, resourceId);\n\n      content.push({\n        type: \"resource_link\",\n        uri: resource.uri,\n        name: `${isOdd ? \"Text\" : \"Blob\"} Resource ${resourceId}`,\n        description: `Resource ${resourceId}: ${\n          resource.mimeType === \"text/plain\"\n            ? \"plaintext resource\"\n            : \"binary blob resource\"\n        }`,\n        mimeType: resource.mimeType,\n      });\n    }\n\n    return { content };\n  });\n};\n"
  },
  {
    "path": "src/everything/tools/get-resource-reference.ts",
    "content": "import { z } from \"zod\";\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { CallToolResult } from \"@modelcontextprotocol/sdk/types.js\";\nimport {\n  textResource,\n  textResourceUri,\n  blobResourceUri,\n  blobResource,\n  RESOURCE_TYPE_BLOB,\n  RESOURCE_TYPE_TEXT,\n  RESOURCE_TYPES,\n} from \"../resources/templates.js\";\n\n// Tool input schema\nconst GetResourceReferenceSchema = z.object({\n  resourceType: z\n    .enum([RESOURCE_TYPE_TEXT, RESOURCE_TYPE_BLOB])\n    .default(RESOURCE_TYPE_TEXT),\n  resourceId: z\n    .number()\n    .default(1)\n    .describe(\"ID of the text resource to fetch\"),\n});\n\n// Tool configuration\nconst name = \"get-resource-reference\";\nconst config = {\n  title: \"Get Resource Reference Tool\",\n  description: \"Returns a resource reference that can be used by MCP clients\",\n  inputSchema: GetResourceReferenceSchema,\n};\n\n/**\n * Registers the 'get-resource-reference' tool.\n *\n * The registered tool validates and processes arguments for retrieving a resource\n * reference. Supported resource types include predefined `RESOURCE_TYPE_TEXT` and\n * `RESOURCE_TYPE_BLOB`. The retrieved resource's reference will include the resource\n * ID, type, and its associated URI.\n *\n * The tool performs the following operations:\n * 1. Validates the `resourceType` argument to ensure it matches a supported type.\n * 2. Validates the `resourceId` argument to ensure it is a finite positive integer.\n * 3. Constructs a URI for the resource based on its type (text or blob).\n * 4. Retrieves the resource and returns it in a content block.\n *\n * @param {McpServer} server - The McpServer instance where the tool will be registered.\n */\nexport const registerGetResourceReferenceTool = (server: McpServer) => {\n  server.registerTool(name, config, async (args): Promise<CallToolResult> => {\n    // Validate resource type argument\n    const { resourceType } = args;\n    if (!RESOURCE_TYPES.includes(resourceType)) {\n      throw new Error(\n        `Invalid resourceType: ${args?.resourceType}. Must be ${RESOURCE_TYPE_TEXT} or ${RESOURCE_TYPE_BLOB}.`\n      );\n    }\n\n    // Validate resourceId argument\n    const resourceId = Number(args?.resourceId);\n    if (\n      !Number.isFinite(resourceId) ||\n      !Number.isInteger(resourceId) ||\n      resourceId < 1\n    ) {\n      throw new Error(\n        `Invalid resourceId: ${args?.resourceId}. Must be a finite positive integer.`\n      );\n    }\n\n    // Get resource based on the resource type\n    const uri =\n      resourceType === RESOURCE_TYPE_TEXT\n        ? textResourceUri(resourceId)\n        : blobResourceUri(resourceId);\n    const resource =\n      resourceType === RESOURCE_TYPE_TEXT\n        ? textResource(uri, resourceId)\n        : blobResource(uri, resourceId);\n\n    return {\n      content: [\n        {\n          type: \"text\",\n          text: `Returning resource reference for Resource ${resourceId}:`,\n        },\n        {\n          type: \"resource\",\n          resource: resource,\n        },\n        {\n          type: \"text\",\n          text: `You can access this resource using the URI: ${resource.uri}`,\n        },\n      ],\n    };\n  });\n};\n"
  },
  {
    "path": "src/everything/tools/get-roots-list.ts",
    "content": "import { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { CallToolResult } from \"@modelcontextprotocol/sdk/types.js\";\nimport { syncRoots } from \"../server/roots.js\";\n\n// Tool configuration\nconst name = \"get-roots-list\";\nconst config = {\n  title: \"Get Roots List Tool\",\n  description:\n    \"Lists the current MCP roots provided by the client. Demonstrates the roots protocol capability even though this server doesn't access files.\",\n  inputSchema: {},\n};\n\n/**\n * Registers the 'get-roots-list' tool.\n *\n * If the client does not support the roots capability, the tool is not registered.\n *\n * The registered tool interacts with the MCP roots capability, which enables the server to access\n * information about the client's workspace directories or file system roots.\n *\n * When supported, the server automatically retrieves and formats the current list of roots from the\n * client upon connection and whenever the client sends a `roots/list_changed` notification.\n *\n * Therefore, this tool displays the roots that the server currently knows about for the connected\n * client. If for some reason the server never got the initial roots list, the tool will request the\n * list from the client again.\n *\n * @param {McpServer} server - The McpServer instance where the tool will be registered.\n */\nexport const registerGetRootsListTool = (server: McpServer) => {\n  // Does client support roots?\n  const clientCapabilities = server.server.getClientCapabilities() || {};\n  const clientSupportsRoots: boolean = clientCapabilities.roots !== undefined;\n\n  // If so, register tool\n  if (clientSupportsRoots) {\n    server.registerTool(\n      name,\n      config,\n      async (args, extra): Promise<CallToolResult> => {\n        // Get the current rootsFetch the current roots list from the client if need be\n        const currentRoots = await syncRoots(server, extra.sessionId);\n\n        // Respond if client supports roots but doesn't have any configured\n        if (\n          clientSupportsRoots &&\n          (!currentRoots || currentRoots.length === 0)\n        ) {\n          return {\n            content: [\n              {\n                type: \"text\",\n                text:\n                  \"The client supports roots but no roots are currently configured.\\n\\n\" +\n                  \"This could mean:\\n\" +\n                  \"1. The client hasn't provided any roots yet\\n\" +\n                  \"2. The client provided an empty roots list\\n\" +\n                  \"3. The roots configuration is still being loaded\",\n              },\n            ],\n          };\n        }\n\n        // Create formatted response if there is a list of roots\n        const rootsList = currentRoots\n          ? currentRoots\n              .map((root, index) => {\n                return `${index + 1}. ${root.name || \"Unnamed Root\"}\\n   URI: ${\n                  root.uri\n                }`;\n              })\n              .join(\"\\n\\n\")\n          : \"No roots found\";\n\n        return {\n          content: [\n            {\n              type: \"text\",\n              text:\n                `Current MCP Roots (${\n                  currentRoots!.length\n                } total):\\n\\n${rootsList}\\n\\n` +\n                \"Note: This server demonstrates the roots protocol capability but doesn't actually access files. \" +\n                \"The roots are provided by the MCP client and can be used by servers that need file system access.\",\n            },\n          ],\n        };\n      }\n    );\n  }\n};\n"
  },
  {
    "path": "src/everything/tools/get-structured-content.ts",
    "content": "import { z } from \"zod\";\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport {\n  CallToolResult,\n  ContentBlock,\n} from \"@modelcontextprotocol/sdk/types.js\";\n\n// Tool input schema\nconst GetStructuredContentInputSchema = {\n  location: z\n    .enum([\"New York\", \"Chicago\", \"Los Angeles\"])\n    .describe(\"Choose city\"),\n};\n\n// Tool output schema\nconst GetStructuredContentOutputSchema = z.object({\n  temperature: z.number().describe(\"Temperature in celsius\"),\n  conditions: z.string().describe(\"Weather conditions description\"),\n  humidity: z.number().describe(\"Humidity percentage\"),\n});\n\n// Tool configuration\nconst name = \"get-structured-content\";\nconst config = {\n  title: \"Get Structured Content Tool\",\n  description:\n    \"Returns structured content along with an output schema for client data validation\",\n  inputSchema: GetStructuredContentInputSchema,\n  outputSchema: GetStructuredContentOutputSchema,\n};\n\n/**\n * Registers the 'get-structured-content' tool.\n *\n * The registered tool processes incoming arguments using a predefined input schema,\n * generates structured content with weather information including temperature,\n * conditions, and humidity, and returns both backward-compatible content blocks\n * and structured content in the response.\n *\n * The response contains:\n * - `content`: An array of content blocks, presented as JSON stringified objects.\n * - `structuredContent`: A JSON structured representation of the weather data.\n *\n * @param {McpServer} server - The McpServer instance where the tool will be registered.\n */\nexport const registerGetStructuredContentTool = (server: McpServer) => {\n  server.registerTool(name, config, async (args): Promise<CallToolResult> => {\n    // Get simulated weather for the chosen city\n    let weather;\n    switch (args.location) {\n      case \"New York\":\n        weather = {\n          temperature: 33,\n          conditions: \"Cloudy\",\n          humidity: 82,\n        };\n        break;\n\n      case \"Chicago\":\n        weather = {\n          temperature: 36,\n          conditions: \"Light rain / drizzle\",\n          humidity: 82,\n        };\n        break;\n\n      case \"Los Angeles\":\n        weather = {\n          temperature: 73,\n          conditions: \"Sunny / Clear\",\n          humidity: 48,\n        };\n        break;\n    }\n\n    const backwardCompatibleContentBlock: ContentBlock = {\n      type: \"text\",\n      text: JSON.stringify(weather),\n    };\n\n    return {\n      content: [backwardCompatibleContentBlock],\n      structuredContent: weather,\n    };\n  });\n};\n"
  },
  {
    "path": "src/everything/tools/get-sum.ts",
    "content": "import { z } from \"zod\";\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { CallToolResult } from \"@modelcontextprotocol/sdk/types.js\";\n\n// Tool input schema\nconst GetSumSchema = z.object({\n  a: z.number().describe(\"First number\"),\n  b: z.number().describe(\"Second number\"),\n});\n\n// Tool configuration\nconst name = \"get-sum\";\nconst config = {\n  title: \"Get Sum Tool\",\n  description: \"Returns the sum of two numbers\",\n  inputSchema: GetSumSchema,\n};\n\n/**\n * Registers the 'get-sum' tool.\n **\n * The registered tool processes input arguments, validates them using a predefined schema,\n * calculates the sum of two numeric values, and returns the result in a content block.\n *\n * Expects input arguments to conform to a specific schema that includes two numeric properties, `a` and `b`.\n * Validation is performed to ensure the input adheres to the expected structure before calculating the sum.\n *\n * The result is returned as a Promise resolving to an object containing the computed sum in a text format.\n *\n * @param {McpServer} server - The McpServer instance where the tool will be registered.\n */\nexport const registerGetSumTool = (server: McpServer) => {\n  server.registerTool(name, config, async (args): Promise<CallToolResult> => {\n    const validatedArgs = GetSumSchema.parse(args);\n    const sum = validatedArgs.a + validatedArgs.b;\n    return {\n      content: [\n        {\n          type: \"text\",\n          text: `The sum of ${validatedArgs.a} and ${validatedArgs.b} is ${sum}.`,\n        },\n      ],\n    };\n  });\n};\n"
  },
  {
    "path": "src/everything/tools/get-tiny-image.ts",
    "content": "import { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { CallToolResult } from \"@modelcontextprotocol/sdk/types.js\";\n\n// A tiny encoded MCP logo image\nexport const MCP_TINY_IMAGE =\n  \"iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAKsGlDQ1BJQ0MgUHJvZmlsZQAASImVlwdUU+kSgOfe9JDQEiIgJfQmSCeAlBBaAAXpYCMkAUKJMRBU7MriClZURLCs6KqIgo0idizYFsWC3QVZBNR1sWDDlXeBQ9jdd9575805c+a7c+efmf+e/z9nLgCdKZDJMlF1gCxpjjwyyI8dn5DIJvUABRiY0kBdIMyWcSMiwgCTUft3+dgGyJC9YzuU69/f/1fREImzhQBIBMbJomxhFsbHMe0TyuQ5ALg9mN9kbo5siK9gzJRjDWL8ZIhTR7hviJOHGY8fjomO5GGsDUCmCQTyVACaKeZn5wpTsTw0f4ztpSKJFGPsGbyzsmaLMMbqgiUWI8N4KD8n+S95Uv+WM1mZUyBIVfLIXoaF7C/JlmUK5v+fn+N/S1amYrSGOaa0NHlwJGaxvpAHGbNDlSxNnhI+yhLRcPwwpymCY0ZZmM1LHGWRwD9UuTZzStgop0gC+co8OfzoURZnB0SNsnx2pLJWipzHHWWBfKyuIiNG6U8T85X589Ki40Y5VxI7ZZSzM6JCx2J4Sr9cEansXywN8hurG6jce1b2X/Yr4SvX5qRFByv3LhjrXyzljuXMjlf2JhL7B4zFxCjjZTl+ylqyzAhlvDgzSOnPzo1Srs3BDuTY2gjlN0wXhESMMoRBELAhBjIhB+QggECQgBTEOeJ5Q2cUeLNl8+WS1LQcNhe7ZWI2Xyq0m8B2tHd0Bhi6syNH4j1r+C4irGtjvhWVAF4nBgcHT475Qm4BHEkCoNaO+SxnAKh3A1w5JVTIc0d8Q9cJCEAFNWCCDhiACViCLTiCK3iCLwRACIRDNCTATBBCGmRhnc+FhbAMCqAI1sNmKIOdsBv2wyE4CvVwCs7DZbgOt+AePIZ26IJX0AcfYQBBEBJCRxiIDmKImCE2iCPCQbyRACQMiUQSkCQkFZEiCmQhsgIpQoqRMmQXUokcQU4g55GrSCvyEOlAepF3yFcUh9JQJqqPmqMTUQ7KRUPRaHQGmorOQfPQfHQtWopWoAfROvQ8eh29h7ajr9B+HOBUcCycEc4Wx8HxcOG4RFwKTo5bjCvEleAqcNW4Rlwz7g6uHfca9wVPxDPwbLwt3hMfjI/BC/Fz8Ivxq/Fl+P34OvxF/B18B74P/51AJ+gRbAgeBD4hnpBKmEsoIJQQ9hJqCZcI9whdhI9EIpFFtCC6EYOJCcR04gLiauJ2Yg3xHLGV2EnsJ5FIOiQbkhcpnCQg5ZAKSFtJB0lnSbdJXaTPZBWyIdmRHEhOJEvJy8kl5APkM+Tb5G7yAEWdYkbxoIRTRJT5lHWUPZRGyk1KF2WAqkG1oHpRo6np1GXUUmo19RL1CfW9ioqKsYq7ylQVicpSlVKVwypXVDpUvtA0adY0Hm06TUFbS9tHO0d7SHtPp9PN6b70RHoOfS29kn6B/oz+WZWhaqfKVxWpLlEtV61Tva36Ro2iZqbGVZuplqdWonZM7abaa3WKurk6T12gvli9XP2E+n31fg2GhoNGuEaWxmqNAxpXNXo0SZrmmgGaIs18zd2aFzQ7GTiGCYPHEDJWMPYwLjG6mESmBZPPTGcWMQ8xW5h9WppazlqxWvO0yrVOa7WzcCxzFp+VyVrHOspqY30dpz+OO048btW46nG3x33SHq/tqy3WLtSu0b6n/VWHrROgk6GzQade56kuXtdad6ruXN0dupd0X49njvccLxxfOP7o+Ed6qJ61XqTeAr3dejf0+vUN9IP0Zfpb9S/ovzZgGfgapBtsMjhj0GvIMPQ2lBhuMjxr+JKtxeayM9ml7IvsPiM9o2AjhdEuoxajAWML4xjj5cY1xk9NqCYckxSTTSZNJn2mhqaTTReaVpk+MqOYcczSzLaYNZt9MrcwjzNfaV5v3mOhbcG3yLOosnhiSbf0sZxjWWF514poxbHKsNpudcsatXaxTrMut75pg9q42khsttu0TiBMcJ8gnVAx4b4tzZZrm2tbZdthx7ILs1tuV2/3ZqLpxMSJGyY2T/xu72Kfab/H/rGDpkOIw3KHRod3jtaOQsdyx7tOdKdApyVODU5vnW2cxc47nB+4MFwmu6x0aXL509XNVe5a7drrZuqW5LbN7T6HyYngrOZccSe4+7kvcT/l/sXD1SPH46jHH562nhmeBzx7JllMEk/aM6nTy9hL4LXLq92b7Z3k/ZN3u4+Rj8Cnwue5r4mvyHevbzfXipvOPch942fvJ/er9fvE8+At4p3zx/kH+Rf6twRoBsQElAU8CzQOTA2sCuwLcglaEHQumBAcGrwh+D5fny/kV/L7QtxCFoVcDKWFRoWWhT4Psw6ThzVORieHTN44+ckUsynSKfXhEM4P3xj+NMIiYk7EyanEqRFTy6e+iHSIXBjZHMWImhV1IOpjtF/0uujHMZYxipimWLXY6bGVsZ/i/OOK49rjJ8Yvir+eoJsgSWhIJCXGJu5N7J8WMG3ztK7pLtMLprfNsJgxb8bVmbozM2eenqU2SzDrWBIhKS7pQNI3QbigQtCfzE/eltwn5Am3CF+JfEWbRL1iL3GxuDvFK6U4pSfVK3Vjam+aT1pJ2msJT1ImeZsenL4z/VNGeMa+jMHMuMyaLHJWUtYJqaY0Q3pxtsHsebNbZTayAln7HI85m+f0yUPle7OR7BnZDTlMbDi6obBU/KDoyPXOLc/9PDd27rF5GvOk827Mt56/an53XmDezwvwC4QLmhYaLVy2sGMRd9Guxcji5MVNS0yW5C/pWhq0dP8y6rKMZb8st19evPzDirgVjfn6+UvzO38I+qGqQLVAXnB/pefKnT/if5T82LLKadXWVd8LRYXXiuyLSoq+rRauvrbGYU3pmsG1KWtb1rmu27GeuF66vm2Dz4b9xRrFecWdGydvrNvE3lS46cPmWZuvljiX7NxC3aLY0l4aVtqw1XTr+q3fytLK7pX7ldds09u2atun7aLtt3f47qjeqb+zaOfXnyQ/PdgVtKuuwryiZDdxd+7uF3ti9zT/zPm5cq/u3qK9f+6T7mvfH7n/YqVbZeUBvQPrqtAqRVXvwekHbx3yP9RQbVu9q4ZVU3QYDisOvzySdKTtaOjRpmOcY9XHzY5vq2XUFtYhdfPr+urT6tsbEhpaT4ScaGr0bKw9aXdy3ymjU+WntU6vO0M9k39m8Gze2f5zsnOvz6ee72ya1fT4QvyFuxenXmy5FHrpyuXAyxeauc1nr3hdOXXV4+qJa5xr9dddr9fdcLlR+4vLL7Utri11N91uNtzyv9XYOqn1zG2f2+fv+N+5fJd/9/q9Kfda22LaHtyffr/9gehBz8PMh28f5T4aeLz0CeFJ4VP1pyXP9J5V/Gr1a027a/vpDv+OG8+jnj/uFHa++i37t29d+S/oL0q6Dbsrexx7TvUG9t56Oe1l1yvZq4HXBb9r/L7tjeWb43/4/nGjL76v66387eC71e913u/74PyhqT+i/9nHrI8Dnwo/63ze/4Xzpflr3NfugbnfSN9K/7T6s/F76Pcng1mDgzKBXDA8CuAwRVNSAN7tA6AnADCwGYI6bWSmHhZk5D9gmOA/8cjcPSyuANWYGRqNeOcADmNqvhRAzRdgaCyK9gXUyUmpo/Pv8Kw+JAbYv8K0HECi2x6tebQU/iEjc/xf+v6nBWXWv9l/AV0EC6JTIblRAAAAeGVYSWZNTQAqAAAACAAFARIAAwAAAAEAAQAAARoABQAAAAEAAABKARsABQAAAAEAAABSASgAAwAAAAEAAgAAh2kABAAAAAEAAABaAAAAAAAAAJAAAAABAAAAkAAAAAEAAqACAAQAAAABAAAAFKADAAQAAAABAAAAFAAAAAAXNii1AAAACXBIWXMAABYlAAAWJQFJUiTwAAAB82lUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNi4wLjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx0aWZmOllSZXNvbHV0aW9uPjE0NDwvdGlmZjpZUmVzb2x1dGlvbj4KICAgICAgICAgPHRpZmY6T3JpZW50YXRpb24+MTwvdGlmZjpPcmllbnRhdGlvbj4KICAgICAgICAgPHRpZmY6WFJlc29sdXRpb24+MTQ0PC90aWZmOlhSZXNvbHV0aW9uPgogICAgICAgICA8dGlmZjpSZXNvbHV0aW9uVW5pdD4yPC90aWZmOlJlc29sdXRpb25Vbml0PgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KReh49gAAAjRJREFUOBGFlD2vMUEUx2clvoNCcW8hCqFAo1dKhEQpvsF9KrWEBh/ALbQ0KkInBI3SWyGPCCJEQliXgsTLefaca/bBWjvJzs6cOf/fnDkzOQJIjWm06/XKBEGgD8c6nU5VIWgBtQDPZPWtJE8O63a7LBgMMo/Hw0ql0jPjcY4RvmqXy4XMjUYDUwLtdhtmsxnYbDbI5/O0djqdFFKmsEiGZ9jP9gem0yn0ej2Yz+fg9XpfycimAD7DttstQTDKfr8Po9GIIg6Hw1Cr1RTgB+A72GAwgMPhQLBMJgNSXsFqtUI2myUo18pA6QJogefsPrLBX4QdCVatViklw+EQRFGEj88P2O12pEUGATmsXq+TaLPZ0AXgMRF2vMEqlQoJTSYTpNNpApvNZliv1/+BHDaZTAi2Wq1A3Ig0xmMej7+RcZjdbodUKkWAaDQK+GHjHPnImB88JrZIJAKFQgH2+z2BOczhcMiwRCIBgUAA+NN5BP6mj2DYff35gk6nA61WCzBn2JxO5wPM7/fLz4vD0E+OECfn8xl/0Gw2KbLxeAyLxQIsFgt8p75pDSO7h/HbpUWpewCike9WLpfB7XaDy+WCYrFI/slk8i0MnRRAUt46hPMI4vE4+Hw+ec7t9/44VgWigEeby+UgFArJWjUYOqhWG6x50rpcSfR6PVUfNOgEVRlTX0HhrZBKz4MZjUYWi8VoA+lc9H/VaRZYjBKrtXR8tlwumcFgeMWRbZpA9ORQWfVm8A/FsrLaxebd5wAAAABJRU5ErkJggg==\";\n\n// Tool configuration\nconst name = \"get-tiny-image\";\nconst config = {\n  title: \"Get Tiny Image Tool\",\n  description: \"Returns a tiny MCP logo image.\",\n  inputSchema: {},\n};\n\n/**\n * Registers the \"get-tiny-image\" tool.\n *\n * The registered tool returns a response containing a small image alongside some\n * descriptive text.\n *\n * The response structure includes textual content before and after the image.\n * The image is served as a PNG data type and represents the default MCP tiny image.\n *\n * @param server - The McpServer instance where the tool will be registered.\n */\nexport const registerGetTinyImageTool = (server: McpServer) => {\n  server.registerTool(name, config, async (args): Promise<CallToolResult> => {\n    return {\n      content: [\n        {\n          type: \"text\",\n          text: \"Here's the image you requested:\",\n        },\n        {\n          type: \"image\",\n          data: MCP_TINY_IMAGE,\n          mimeType: \"image/png\",\n        },\n        {\n          type: \"text\",\n          text: \"The image above is the MCP logo.\",\n        },\n      ],\n    };\n  });\n};\n"
  },
  {
    "path": "src/everything/tools/gzip-file-as-resource.ts",
    "content": "import { z } from \"zod\";\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { CallToolResult, Resource } from \"@modelcontextprotocol/sdk/types.js\";\nimport { gzipSync } from \"node:zlib\";\nimport {\n  getSessionResourceURI,\n  registerSessionResource,\n} from \"../resources/session.js\";\n\n// Maximum input file size - 10 MB default\nconst GZIP_MAX_FETCH_SIZE = Number(\n  process.env.GZIP_MAX_FETCH_SIZE ?? String(10 * 1024 * 1024)\n);\n\n// Maximum fetch time - 30 seconds default.\nconst GZIP_MAX_FETCH_TIME_MILLIS = Number(\n  process.env.GZIP_MAX_FETCH_TIME_MILLIS ?? String(30 * 1000)\n);\n\n// Comma-separated list of allowed domains. Empty means all domains are allowed.\nconst GZIP_ALLOWED_DOMAINS = (process.env.GZIP_ALLOWED_DOMAINS ?? \"\")\n  .split(\",\")\n  .map((d) => d.trim().toLowerCase())\n  .filter((d) => d.length > 0);\n\n// Tool input schema\nconst GZipFileAsResourceSchema = z.object({\n  name: z.string().describe(\"Name of the output file\").default(\"README.md.gz\"),\n  data: z\n    .string()\n    .url()\n    .describe(\"URL or data URI of the file content to compress\")\n    .default(\n      \"https://raw.githubusercontent.com/modelcontextprotocol/servers/refs/heads/main/README.md\"\n    ),\n  outputType: z\n    .enum([\"resourceLink\", \"resource\"])\n    .default(\"resourceLink\")\n    .describe(\n      \"How the resulting gzipped file should be returned. 'resourceLink' returns a link to a resource that can be read later, 'resource' returns a full resource object.\"\n    ),\n});\n\n// Tool configuration\nconst name = \"gzip-file-as-resource\";\nconst config = {\n  title: \"GZip File as Resource Tool\",\n  description:\n    \"Compresses a single file using gzip compression. Depending upon the selected output type, returns either the compressed data as a gzipped resource or a resource link, allowing it to be downloaded in a subsequent request during the current session.\",\n  inputSchema: GZipFileAsResourceSchema,\n};\n\n/**\n * Registers the `gzip-file-as-resource` tool.\n *\n * The registered tool compresses input data using gzip, and makes the resulting file accessible\n * as a resource for the duration of the session.\n *\n * The tool supports two output types:\n * - \"resource\": Returns the resource directly, including its URI, MIME type, and base64-encoded content.\n * - \"resourceLink\": Returns a link to access the resource later.\n *\n * If an unrecognized `outputType` is provided, the tool throws an error.\n *\n * @param {McpServer} server - The McpServer instance where the tool will be registered.\n * @throws {Error} Throws an error if an unknown output type is specified.\n */\nexport const registerGZipFileAsResourceTool = (server: McpServer) => {\n  server.registerTool(name, config, async (args): Promise<CallToolResult> => {\n    const {\n      name,\n      data: dataUri,\n      outputType,\n    } = GZipFileAsResourceSchema.parse(args);\n\n    // Validate data uri\n    const url = validateDataURI(dataUri);\n\n    // Fetch the data\n    const response = await fetchSafely(url, {\n      maxBytes: GZIP_MAX_FETCH_SIZE,\n      timeoutMillis: GZIP_MAX_FETCH_TIME_MILLIS,\n    });\n\n    // Compress the data using gzip\n    const inputBuffer = Buffer.from(response);\n    const compressedBuffer = gzipSync(inputBuffer);\n\n    // Create resource\n    const uri = getSessionResourceURI(name);\n    const blob = compressedBuffer.toString(\"base64\");\n    const mimeType = \"application/gzip\";\n    const resource = <Resource>{ uri, name, mimeType };\n\n    // Register resource, get resource link in return\n    const resourceLink = registerSessionResource(\n      server,\n      resource,\n      \"blob\",\n      blob\n    );\n\n    // Return the resource or a resource link that can be used to access this resource later\n    if (outputType === \"resource\") {\n      return {\n        content: [\n          {\n            type: \"resource\",\n            resource: { uri, mimeType, blob },\n          },\n        ],\n      };\n    } else if (outputType === \"resourceLink\") {\n      return {\n        content: [resourceLink],\n      };\n    } else {\n      throw new Error(`Unknown outputType: ${outputType}`);\n    }\n  });\n};\n\n/**\n * Validates a given data URI to ensure it follows the appropriate protocols and rules.\n *\n * @param {string} dataUri - The data URI to validate. Must be an HTTP, HTTPS, or data protocol URL. If a domain is provided, it must match the allowed domains list if applicable.\n * @return {URL} The validated and parsed URL object.\n * @throws {Error} If the data URI does not use a supported protocol or does not meet allowed domains criteria.\n */\nfunction validateDataURI(dataUri: string): URL {\n  // Validate Inputs\n  const url = new URL(dataUri);\n  try {\n    if (\n      url.protocol !== \"http:\" &&\n      url.protocol !== \"https:\" &&\n      url.protocol !== \"data:\"\n    ) {\n      throw new Error(\n        `Unsupported URL protocol for ${dataUri}. Only http, https, and data URLs are supported.`\n      );\n    }\n    if (\n      GZIP_ALLOWED_DOMAINS.length > 0 &&\n      (url.protocol === \"http:\" || url.protocol === \"https:\")\n    ) {\n      const domain = url.hostname;\n      const domainAllowed = GZIP_ALLOWED_DOMAINS.some((allowedDomain) => {\n        return domain === allowedDomain || domain.endsWith(`.${allowedDomain}`);\n      });\n      if (!domainAllowed) {\n        throw new Error(`Domain ${domain} is not in the allowed domains list.`);\n      }\n    }\n  } catch (error) {\n    throw new Error(\n      `Error processing file ${dataUri}: ${\n        error instanceof Error ? error.message : String(error)\n      }`\n    );\n  }\n  return url;\n}\n\n/**\n * Fetches data safely from a given URL while ensuring constraints on maximum byte size and timeout duration.\n *\n * @param {URL} url The URL to fetch data from.\n * @param {Object} options An object containing options for the fetch operation.\n * @param {number} options.maxBytes The maximum allowed size (in bytes) of the response. If the response exceeds this size, the operation will be aborted.\n * @param {number} options.timeoutMillis The timeout duration (in milliseconds) for the fetch operation. If the fetch takes longer, it will be aborted.\n * @return {Promise<ArrayBuffer>} A promise that resolves with the response as an ArrayBuffer if successful.\n * @throws {Error} Throws an error if the response size exceeds the defined limit, the fetch times out, or the response is otherwise invalid.\n */\nasync function fetchSafely(\n  url: URL,\n  { maxBytes, timeoutMillis }: { maxBytes: number; timeoutMillis: number }\n): Promise<ArrayBuffer> {\n  const controller = new AbortController();\n  const timeout = setTimeout(\n    () =>\n      controller.abort(\n        `Fetching ${url} took more than ${timeoutMillis} ms and was aborted.`\n      ),\n    timeoutMillis\n  );\n\n  try {\n    // Fetch the data\n    const response = await fetch(url, { signal: controller.signal });\n    if (!response.body) {\n      throw new Error(\"No response body\");\n    }\n\n    // Note: we can't trust the Content-Length header: a malicious or clumsy server could return much more data than advertised.\n    // We check it here for early bail-out, but we still need to monitor actual bytes read below.\n    const contentLengthHeader = response.headers.get(\"content-length\");\n    if (contentLengthHeader != null) {\n      const contentLength = parseInt(contentLengthHeader, 10);\n      if (contentLength > maxBytes) {\n        throw new Error(\n          `Content-Length for ${url} exceeds max of ${maxBytes}: ${contentLength}`\n        );\n      }\n    }\n\n    // Read the fetched data from the response body\n    const reader = response.body.getReader();\n    const chunks = [];\n    let totalSize = 0;\n\n    // Read chunks until done\n    try {\n      while (true) {\n        const { done, value } = await reader.read();\n        if (done) break;\n\n        totalSize += value.length;\n\n        if (totalSize > maxBytes) {\n          reader.cancel();\n          throw new Error(`Response from ${url} exceeds ${maxBytes} bytes`);\n        }\n\n        chunks.push(value);\n      }\n    } finally {\n      reader.releaseLock();\n    }\n\n    // Combine chunks into a single buffer\n    const buffer = new Uint8Array(totalSize);\n    let offset = 0;\n    for (const chunk of chunks) {\n      buffer.set(chunk, offset);\n      offset += chunk.length;\n    }\n\n    return buffer.buffer;\n  } finally {\n    clearTimeout(timeout);\n  }\n}\n"
  },
  {
    "path": "src/everything/tools/index.ts",
    "content": "import { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { registerGetAnnotatedMessageTool } from \"./get-annotated-message.js\";\nimport { registerEchoTool } from \"./echo.js\";\nimport { registerGetEnvTool } from \"./get-env.js\";\nimport { registerGetResourceLinksTool } from \"./get-resource-links.js\";\nimport { registerGetResourceReferenceTool } from \"./get-resource-reference.js\";\nimport { registerGetRootsListTool } from \"./get-roots-list.js\";\nimport { registerGetStructuredContentTool } from \"./get-structured-content.js\";\nimport { registerGetSumTool } from \"./get-sum.js\";\nimport { registerGetTinyImageTool } from \"./get-tiny-image.js\";\nimport { registerGZipFileAsResourceTool } from \"./gzip-file-as-resource.js\";\nimport { registerToggleSimulatedLoggingTool } from \"./toggle-simulated-logging.js\";\nimport { registerToggleSubscriberUpdatesTool } from \"./toggle-subscriber-updates.js\";\nimport { registerTriggerElicitationRequestTool } from \"./trigger-elicitation-request.js\";\nimport { registerTriggerLongRunningOperationTool } from \"./trigger-long-running-operation.js\";\nimport { registerTriggerSamplingRequestTool } from \"./trigger-sampling-request.js\";\nimport { registerTriggerSamplingRequestAsyncTool } from \"./trigger-sampling-request-async.js\";\nimport { registerTriggerElicitationRequestAsyncTool } from \"./trigger-elicitation-request-async.js\";\nimport { registerSimulateResearchQueryTool } from \"./simulate-research-query.js\";\n\n/**\n * Register the tools with the MCP server.\n * @param server\n */\nexport const registerTools = (server: McpServer) => {\n  registerEchoTool(server);\n  registerGetAnnotatedMessageTool(server);\n  registerGetEnvTool(server);\n  registerGetResourceLinksTool(server);\n  registerGetResourceReferenceTool(server);\n  registerGetStructuredContentTool(server);\n  registerGetSumTool(server);\n  registerGetTinyImageTool(server);\n  registerGZipFileAsResourceTool(server);\n  registerToggleSimulatedLoggingTool(server);\n  registerToggleSubscriberUpdatesTool(server);\n  registerTriggerLongRunningOperationTool(server);\n};\n\n/**\n * Register the tools that are conditional upon client capabilities.\n * These must be registered conditionally, after initialization.\n */\nexport const registerConditionalTools = (server: McpServer) => {\n  registerGetRootsListTool(server);\n  registerTriggerElicitationRequestTool(server);\n  registerTriggerSamplingRequestTool(server);\n  // Task-based research tool (uses experimental tasks API)\n  registerSimulateResearchQueryTool(server);\n  // Bidirectional task tools - server sends requests that client executes as tasks\n  registerTriggerSamplingRequestAsyncTool(server);\n  registerTriggerElicitationRequestAsyncTool(server);\n};\n"
  },
  {
    "path": "src/everything/tools/simulate-research-query.ts",
    "content": "import { z } from \"zod\";\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport {\n  CallToolResult,\n  GetTaskResult,\n  Task,\n  ElicitResult,\n  ElicitResultSchema,\n} from \"@modelcontextprotocol/sdk/types.js\";\nimport { CreateTaskResult } from \"@modelcontextprotocol/sdk/experimental/tasks\";\n\n// Tool input schema\nconst SimulateResearchQuerySchema = z.object({\n  topic: z.string().describe(\"The research topic to investigate\"),\n  ambiguous: z\n    .boolean()\n    .default(false)\n    .describe(\n      \"Simulate an ambiguous query that requires clarification (triggers input_required status)\"\n    ),\n});\n\n// Research stages\nconst STAGES = [\n  \"Gathering sources\",\n  \"Analyzing content\",\n  \"Synthesizing findings\",\n  \"Generating report\",\n];\n\n// Duration per stage in milliseconds\nconst STAGE_DURATION = 1000;\n\n// Internal state for tracking research tasks\ninterface ResearchState {\n  topic: string;\n  ambiguous: boolean;\n  currentStage: number;\n  clarification?: string;\n  completed: boolean;\n  result?: CallToolResult;\n}\n\n// Map to store research state per task\nconst researchStates = new Map<string, ResearchState>();\n\n/**\n * Runs the background research process.\n * Updates task status as it progresses through stages.\n * If clarification is needed, attempts elicitation via sendRequest.\n *\n * Note: Elicitation only works on STDIO transport. On HTTP transport,\n * sendRequest will fail and the task will use a default interpretation.\n * Full HTTP support requires SDK PR #1210's elicitInputStream API.\n */\nasync function runResearchProcess(\n  taskId: string,\n  args: z.infer<typeof SimulateResearchQuerySchema>,\n  taskStore: {\n    updateTaskStatus: (\n      taskId: string,\n      status: Task[\"status\"],\n      message?: string\n    ) => Promise<void>;\n    storeTaskResult: (\n      taskId: string,\n      status: \"completed\" | \"failed\",\n      result: CallToolResult\n    ) => Promise<void>;\n  },\n  // eslint-disable-next-line @typescript-eslint/no-explicit-any\n  sendRequest: any\n): Promise<void> {\n  const state = researchStates.get(taskId);\n  if (!state) return;\n\n  // Process each stage\n  for (let i = state.currentStage; i < STAGES.length; i++) {\n    state.currentStage = i;\n\n    // Check if task was cancelled externally\n    if (state.completed) return;\n\n    // Update status message for current stage\n    await taskStore.updateTaskStatus(taskId, \"working\", `${STAGES[i]}...`);\n\n    // At synthesis stage (index 2), check if clarification is needed\n    if (i === 2 && state.ambiguous && !state.clarification) {\n      // Update status to show we're requesting input (spec SHOULD)\n      await taskStore.updateTaskStatus(\n        taskId,\n        \"input_required\",\n        `Found multiple interpretations for \"${state.topic}\". Requesting clarification...`\n      );\n\n      try {\n        // Try elicitation via sendRequest (works on STDIO, fails on HTTP)\n        const elicitResult: ElicitResult = await sendRequest(\n          {\n            method: \"elicitation/create\",\n            params: {\n              message: `The research query \"${state.topic}\" could have multiple interpretations. Please clarify what you're looking for:`,\n              requestedSchema: {\n                type: \"object\",\n                properties: {\n                  interpretation: {\n                    type: \"string\",\n                    title: \"Clarification\",\n                    description:\n                      \"Which interpretation of the topic do you mean?\",\n                    oneOf: getInterpretationsForTopic(state.topic),\n                  },\n                },\n                required: [\"interpretation\"],\n              },\n            },\n          },\n          ElicitResultSchema\n        );\n\n        // Process elicitation response\n        if (elicitResult.action === \"accept\" && elicitResult.content) {\n          state.clarification =\n            (elicitResult.content as { interpretation?: string })\n              .interpretation || \"User accepted without selection\";\n        } else if (elicitResult.action === \"decline\") {\n          state.clarification = \"User declined - using default interpretation\";\n        } else {\n          state.clarification = \"User cancelled - using default interpretation\";\n        }\n      } catch (error) {\n        // Elicitation failed (likely HTTP transport without streaming support)\n        // Use default interpretation and continue - task should still complete\n        console.warn(\n          `Elicitation failed for task ${taskId} (HTTP transport?):`,\n          error instanceof Error ? error.message : String(error)\n        );\n        state.clarification =\n          \"technical (default - elicitation unavailable on HTTP)\";\n      }\n\n      // Resume with working status (spec SHOULD)\n      await taskStore.updateTaskStatus(\n        taskId,\n        \"working\",\n        `Continuing with interpretation: \"${state.clarification}\"...`\n      );\n\n      // Continue processing (no return - just keep going through the loop)\n    }\n\n    // Simulate work for this stage\n    await new Promise((resolve) => setTimeout(resolve, STAGE_DURATION));\n  }\n\n  // All stages complete - generate result\n  state.completed = true;\n  const result = generateResearchReport(state);\n  state.result = result;\n\n  await taskStore.storeTaskResult(taskId, \"completed\", result);\n}\n\n/**\n * Generates the final research report with educational content about tasks.\n */\nfunction generateResearchReport(state: ResearchState): CallToolResult {\n  const topic = state.clarification\n    ? `${state.topic} (${state.clarification})`\n    : state.topic;\n\n  const report = `# Research Report: ${topic}\n\n## Research Parameters\n- **Topic**: ${state.topic}\n${state.clarification ? `- **Clarification**: ${state.clarification}` : \"\"}\n\n## Synthesis\nThis research query was processed through ${STAGES.length} stages:\n${STAGES.map((s, i) => `- Stage ${i + 1}: ${s} ✓`).join(\"\\n\")}\n\n---\n\n## About This Demo (SEP-1686: Tasks)\n\nThis tool demonstrates MCP's task-based execution pattern for long-running operations:\n\n**Task Lifecycle Demonstrated:**\n1. \\`tools/call\\` with \\`task\\` parameter → Server returns \\`CreateTaskResult\\` (not the final result)\n2. Client polls \\`tasks/get\\` → Server returns current status and \\`statusMessage\\`\n3. Status progressed: \\`working\\` → ${\n    state.clarification ? `\\`input_required\\` → \\`working\\` → ` : \"\"\n  }\\`completed\\`\n4. Client calls \\`tasks/result\\` → Server returns this final result\n\n${\n  state.clarification\n    ? `**Elicitation Flow:**\nWhen the query was ambiguous, the server sent an \\`elicitation/create\\` request\nto the client. The task status changed to \\`input_required\\` while awaiting user input.\n${\n  state.clarification.includes(\"unavailable on HTTP\")\n    ? `\n**Note:** Elicitation was skipped because this server is running over HTTP transport.\nThe current SDK's \\`sendRequest\\` only works over STDIO. Full HTTP elicitation support\nrequires SDK PR #1210's streaming \\`elicitInputStream\\` API.\n`\n    : `After receiving clarification (\"${state.clarification}\"), the task resumed processing and completed.`\n}\n`\n    : \"\"\n}\n**Key Concepts:**\n- Tasks enable \"call now, fetch later\" patterns\n- \\`statusMessage\\` provides human-readable progress updates\n- Tasks have TTL (time-to-live) for automatic cleanup\n- \\`pollInterval\\` suggests how often to check status\n- Elicitation requests can be sent directly during task execution\n\n*This is a simulated research report from the Everything MCP Server.*\n`;\n\n  return {\n    content: [\n      {\n        type: \"text\",\n        text: report,\n      },\n    ],\n  };\n}\n\n/**\n * Registers the 'simulate-research-query' tool as a task-based tool.\n *\n * This tool demonstrates the MCP Tasks feature (SEP-1686) with a real-world scenario:\n * a research tool that gathers and synthesizes information from multiple sources.\n * If the query is ambiguous, it pauses to ask for clarification before completing.\n *\n * @param {McpServer} server - The McpServer instance where the tool will be registered.\n */\nexport const registerSimulateResearchQueryTool = (server: McpServer) => {\n  // Check if client supports elicitation (needed for input_required flow)\n  const clientCapabilities = server.server.getClientCapabilities() || {};\n  const clientSupportsElicitation: boolean =\n    clientCapabilities.elicitation !== undefined;\n\n  server.experimental.tasks.registerToolTask(\n    \"simulate-research-query\",\n    {\n      title: \"Simulate Research Query\",\n      description:\n        \"Simulates a deep research operation that gathers, analyzes, and synthesizes information. \" +\n        \"Demonstrates MCP task-based operations with progress through multiple stages. \" +\n        \"If 'ambiguous' is true and client supports elicitation, sends an elicitation request for clarification.\",\n      inputSchema: SimulateResearchQuerySchema,\n      execution: { taskSupport: \"required\" },\n    },\n    {\n      /**\n       * Creates a new research task and starts background processing.\n       */\n      createTask: async (args, extra): Promise<CreateTaskResult> => {\n        const validatedArgs = SimulateResearchQuerySchema.parse(args);\n\n        // Create the task in the store\n        const task = await extra.taskStore.createTask({\n          ttl: 300000, // 5 minutes\n          pollInterval: 1000,\n        });\n\n        // Initialize research state\n        const state: ResearchState = {\n          topic: validatedArgs.topic,\n          ambiguous: validatedArgs.ambiguous && clientSupportsElicitation,\n          currentStage: 0,\n          completed: false,\n        };\n        researchStates.set(task.taskId, state);\n\n        // Start background research (don't await - runs asynchronously)\n        // Pass sendRequest for elicitation (works on STDIO, gracefully degrades on HTTP)\n        runResearchProcess(\n          task.taskId,\n          validatedArgs,\n          extra.taskStore,\n          extra.sendRequest\n        ).catch((error) => {\n          console.error(`Research task ${task.taskId} failed:`, error);\n          extra.taskStore\n            .updateTaskStatus(task.taskId, \"failed\", String(error))\n            .catch(console.error);\n        });\n\n        return { task };\n      },\n\n      /**\n       * Returns the current status of the research task.\n       */\n      getTask: async (args, extra): Promise<GetTaskResult> => {\n        return await extra.taskStore.getTask(extra.taskId);\n      },\n\n      /**\n       * Returns the task result.\n       * Elicitation is now handled directly in the background process.\n       */\n      getTaskResult: async (args, extra): Promise<CallToolResult> => {\n        // Return the stored result\n        const result = await extra.taskStore.getTaskResult(extra.taskId);\n\n        // Clean up state\n        researchStates.delete(extra.taskId);\n\n        return result as CallToolResult;\n      },\n    }\n  );\n};\n\n/**\n * Returns contextual interpretation options based on the topic.\n */\nfunction getInterpretationsForTopic(\n  topic: string\n): Array<{ const: string; title: string }> {\n  const lowerTopic = topic.toLowerCase();\n\n  // Example: contextual interpretations for \"python\"\n  if (lowerTopic.includes(\"python\")) {\n    return [\n      { const: \"programming\", title: \"Python programming language\" },\n      { const: \"snake\", title: \"Python snake species\" },\n      { const: \"comedy\", title: \"Monty Python comedy group\" },\n    ];\n  }\n\n  // Default generic interpretations\n  return [\n    { const: \"technical\", title: \"Technical/scientific perspective\" },\n    { const: \"historical\", title: \"Historical perspective\" },\n    { const: \"current\", title: \"Current events/news perspective\" },\n  ];\n}\n"
  },
  {
    "path": "src/everything/tools/toggle-simulated-logging.ts",
    "content": "import { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { CallToolResult } from \"@modelcontextprotocol/sdk/types.js\";\nimport {\n  beginSimulatedLogging,\n  stopSimulatedLogging,\n} from \"../server/logging.js\";\n\n// Tool configuration\nconst name = \"toggle-simulated-logging\";\nconst config = {\n  title: \"Toggle Simulated Logging\",\n  description: \"Toggles simulated, random-leveled logging on or off.\",\n  inputSchema: {},\n};\n\n// Track enabled clients by session id\nconst clients: Set<string | undefined> = new Set<string | undefined>();\n\n/**\n * Registers the `toggle-simulated-logging` tool.\n *\n * The registered tool enables or disables the sending of periodic, random-leveled\n * logging messages the connected client.\n *\n * When invoked, it either starts or stops simulated logging based on the session's\n * current state. If logging for the specified session is active, it will be stopped;\n * if it is inactive, logging will be started.\n *\n * @param {McpServer} server - The McpServer instance where the tool will be registered.\n */\nexport const registerToggleSimulatedLoggingTool = (server: McpServer) => {\n  server.registerTool(\n    name,\n    config,\n    async (_args, extra): Promise<CallToolResult> => {\n      const sessionId = extra?.sessionId;\n\n      let response: string;\n      if (clients.has(sessionId)) {\n        stopSimulatedLogging(sessionId);\n        clients.delete(sessionId);\n        response = `Stopped simulated logging for session ${sessionId}`;\n      } else {\n        beginSimulatedLogging(server, sessionId);\n        clients.add(sessionId);\n        response = `Started simulated, random-leveled logging for session ${sessionId} at a 5 second pace. Client's selected logging level will be respected. If an interval elapses and the message to be sent is below the selected level, it will not be sent. Thus at higher chosen logging levels, messages should arrive further apart. `;\n      }\n\n      return {\n        content: [{ type: \"text\", text: `${response}` }],\n      };\n    }\n  );\n};\n"
  },
  {
    "path": "src/everything/tools/toggle-subscriber-updates.ts",
    "content": "import { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { CallToolResult } from \"@modelcontextprotocol/sdk/types.js\";\nimport {\n  beginSimulatedResourceUpdates,\n  stopSimulatedResourceUpdates,\n} from \"../resources/subscriptions.js\";\n\n// Tool configuration\nconst name = \"toggle-subscriber-updates\";\nconst config = {\n  title: \"Toggle Subscriber Updates\",\n  description: \"Toggles simulated resource subscription updates on or off.\",\n  inputSchema: {},\n};\n\n// Track enabled clients by session id\nconst clients: Set<string | undefined> = new Set<string | undefined>();\n\n/**\n * Registers the `toggle-subscriber-updates` tool.\n *\n * The registered tool enables or disables the sending of periodic, simulated resource\n * update messages the connected client for any subscriptions they have made.\n *\n * When invoked, it either starts or stops simulated resource updates based on the session's\n * current state. If simulated updates for the specified session is active, it will be stopped;\n * if it is inactive, simulated updates will be started.\n *\n * The response provides feedback indicating whether simulated updates were started or stopped,\n * including the session ID.\n *\n * @param {McpServer} server - The McpServer instance where the tool will be registered.\n */\nexport const registerToggleSubscriberUpdatesTool = (server: McpServer) => {\n  server.registerTool(\n    name,\n    config,\n    async (_args, extra): Promise<CallToolResult> => {\n      const sessionId = extra?.sessionId;\n\n      let response: string;\n      if (clients.has(sessionId)) {\n        stopSimulatedResourceUpdates(sessionId);\n        clients.delete(sessionId);\n        response = `Stopped simulated resource updates for session ${sessionId}`;\n      } else {\n        beginSimulatedResourceUpdates(server, sessionId);\n        clients.add(sessionId);\n        response = `Started simulated resource updated notifications for session ${sessionId} at a 5 second pace. Client will receive updates for any resources the it is subscribed to.`;\n      }\n\n      return {\n        content: [{ type: \"text\", text: `${response}` }],\n      };\n    }\n  );\n};\n"
  },
  {
    "path": "src/everything/tools/trigger-elicitation-request-async.ts",
    "content": "import { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { CallToolResult } from \"@modelcontextprotocol/sdk/types.js\";\nimport { z } from \"zod\";\n\n// Tool configuration\nconst name = \"trigger-elicitation-request-async\";\nconst config = {\n  title: \"Trigger Async Elicitation Request Tool\",\n  description:\n    \"Trigger an async elicitation request that the CLIENT executes as a background task. \" +\n    \"Demonstrates bidirectional MCP tasks where the server sends an elicitation request and \" +\n    \"the client handles user input asynchronously, allowing the server to poll for completion.\",\n  inputSchema: {},\n};\n\n// Poll interval in milliseconds\nconst POLL_INTERVAL = 1000;\n\n// Maximum poll attempts before timeout (10 minutes for user input)\nconst MAX_POLL_ATTEMPTS = 600;\n\n/**\n * Registers the 'trigger-elicitation-request-async' tool.\n *\n * This tool demonstrates bidirectional MCP tasks for elicitation:\n * - Server sends elicitation request to client with task metadata\n * - Client creates a task and returns CreateTaskResult\n * - Client prompts user for input (task status: input_required)\n * - Server polls client's tasks/get endpoint for status\n * - Server fetches final result from client's tasks/result endpoint\n *\n * @param {McpServer} server - The McpServer instance where the tool will be registered.\n */\nexport const registerTriggerElicitationRequestAsyncTool = (\n  server: McpServer\n) => {\n  // Check client capabilities\n  const clientCapabilities = server.server.getClientCapabilities() || {};\n\n  // Client must support elicitation AND tasks.requests.elicitation\n  const clientSupportsElicitation =\n    clientCapabilities.elicitation !== undefined;\n  const clientTasksCapability = clientCapabilities.tasks as\n    | {\n        requests?: { elicitation?: { create?: object } };\n      }\n    | undefined;\n  const clientSupportsAsyncElicitation =\n    clientTasksCapability?.requests?.elicitation?.create !== undefined;\n\n  if (clientSupportsElicitation && clientSupportsAsyncElicitation) {\n    server.registerTool(\n      name,\n      config,\n      async (args, extra): Promise<CallToolResult> => {\n        // Create the elicitation request WITH task metadata\n        // Using z.any() schema to avoid complex type matching with _meta\n        const request = {\n          method: \"elicitation/create\" as const,\n          params: {\n            task: {\n              ttl: 600000, // 10 minutes (user input may take a while)\n            },\n            message:\n              \"Please provide inputs for the following fields (async task demo):\",\n            requestedSchema: {\n              type: \"object\" as const,\n              properties: {\n                name: {\n                  title: \"Your Name\",\n                  type: \"string\" as const,\n                  description: \"Your full name\",\n                },\n                favoriteColor: {\n                  title: \"Favorite Color\",\n                  type: \"string\" as const,\n                  description: \"What is your favorite color?\",\n                  enum: [\"Red\", \"Blue\", \"Green\", \"Yellow\", \"Purple\"],\n                },\n                agreeToTerms: {\n                  title: \"Terms Agreement\",\n                  type: \"boolean\" as const,\n                  description: \"Do you agree to the terms and conditions?\",\n                },\n              },\n              required: [\"name\"],\n            },\n          },\n        };\n\n        // Send the elicitation request\n        // Client may return either:\n        // - ElicitResult (synchronous execution)\n        // - CreateTaskResult (task-based execution with { task } object)\n        const elicitResponse = await extra.sendRequest(\n          request as Parameters<typeof extra.sendRequest>[0],\n          z.union([\n            // CreateTaskResult - client created a task\n            z.object({\n              task: z.object({\n                taskId: z.string(),\n                status: z.string(),\n                pollInterval: z.number().optional(),\n                statusMessage: z.string().optional(),\n              }),\n            }),\n            // ElicitResult - synchronous execution\n            z.object({\n              action: z.string(),\n              content: z.any().optional(),\n            }),\n          ])\n        );\n\n        // Check if client returned CreateTaskResult (has task object)\n        const isTaskResult = \"task\" in elicitResponse && elicitResponse.task;\n        if (!isTaskResult) {\n          // Client executed synchronously - return the direct response\n          return {\n            content: [\n              {\n                type: \"text\",\n                text: `[SYNC] Client executed synchronously:\\n${JSON.stringify(\n                  elicitResponse,\n                  null,\n                  2\n                )}`,\n              },\n            ],\n          };\n        }\n\n        const taskId = elicitResponse.task.taskId;\n        const statusMessages: string[] = [];\n        statusMessages.push(`Task created: ${taskId}`);\n\n        // Poll for task completion\n        let attempts = 0;\n        let taskStatus = elicitResponse.task.status;\n        let taskStatusMessage: string | undefined;\n\n        while (\n          taskStatus !== \"completed\" &&\n          taskStatus !== \"failed\" &&\n          taskStatus !== \"cancelled\" &&\n          attempts < MAX_POLL_ATTEMPTS\n        ) {\n          // Wait before polling\n          await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL));\n          attempts++;\n\n          // Get task status from client\n          const pollResult = await extra.sendRequest(\n            {\n              method: \"tasks/get\",\n              params: { taskId },\n            },\n            z\n              .object({\n                status: z.string(),\n                statusMessage: z.string().optional(),\n              })\n              .passthrough()\n          );\n\n          taskStatus = pollResult.status;\n          taskStatusMessage = pollResult.statusMessage;\n\n          // Only log status changes or every 10 polls to avoid spam\n          if (\n            attempts === 1 ||\n            attempts % 10 === 0 ||\n            taskStatus !== \"input_required\"\n          ) {\n            statusMessages.push(\n              `Poll ${attempts}: ${taskStatus}${\n                taskStatusMessage ? ` - ${taskStatusMessage}` : \"\"\n              }`\n            );\n          }\n        }\n\n        // Check for timeout\n        if (attempts >= MAX_POLL_ATTEMPTS) {\n          return {\n            content: [\n              {\n                type: \"text\",\n                text: `[TIMEOUT] Task timed out after ${MAX_POLL_ATTEMPTS} poll attempts\\n\\nProgress:\\n${statusMessages.join(\n                  \"\\n\"\n                )}`,\n              },\n            ],\n          };\n        }\n\n        // Check for failure/cancellation\n        if (taskStatus === \"failed\" || taskStatus === \"cancelled\") {\n          return {\n            content: [\n              {\n                type: \"text\",\n                text: `[${taskStatus.toUpperCase()}] ${\n                  taskStatusMessage || \"No message\"\n                }\\n\\nProgress:\\n${statusMessages.join(\"\\n\")}`,\n              },\n            ],\n          };\n        }\n\n        // Fetch the final result\n        const result = await extra.sendRequest(\n          {\n            method: \"tasks/result\",\n            params: { taskId },\n          },\n          z.any()\n        );\n\n        // Format the elicitation result\n        const content: CallToolResult[\"content\"] = [];\n\n        if (result.action === \"accept\" && result.content) {\n          content.push({\n            type: \"text\",\n            text: `[COMPLETED] User provided the requested information!`,\n          });\n\n          const userData = result.content as Record<string, unknown>;\n          const lines = [];\n          if (userData.name) lines.push(`- Name: ${userData.name}`);\n          if (userData.favoriteColor)\n            lines.push(`- Favorite Color: ${userData.favoriteColor}`);\n          if (userData.agreeToTerms !== undefined)\n            lines.push(`- Agreed to terms: ${userData.agreeToTerms}`);\n\n          content.push({\n            type: \"text\",\n            text: `User inputs:\\n${lines.join(\"\\n\")}`,\n          });\n        } else if (result.action === \"decline\") {\n          content.push({\n            type: \"text\",\n            text: `[DECLINED] User declined to provide the requested information.`,\n          });\n        } else if (result.action === \"cancel\") {\n          content.push({\n            type: \"text\",\n            text: `[CANCELLED] User cancelled the elicitation dialog.`,\n          });\n        }\n\n        // Include progress and raw result for debugging\n        content.push({\n          type: \"text\",\n          text: `\\nProgress:\\n${statusMessages.join(\n            \"\\n\"\n          )}\\n\\nRaw result: ${JSON.stringify(result, null, 2)}`,\n        });\n\n        return { content };\n      }\n    );\n  }\n};\n"
  },
  {
    "path": "src/everything/tools/trigger-elicitation-request.ts",
    "content": "import { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport {\n  ElicitResultSchema,\n  CallToolResult,\n} from \"@modelcontextprotocol/sdk/types.js\";\n\n// Tool configuration\nconst name = \"trigger-elicitation-request\";\nconst config = {\n  title: \"Trigger Elicitation Request Tool\",\n  description: \"Trigger a Request from the Server for User Elicitation\",\n  inputSchema: {},\n};\n\n/**\n * Registers the 'trigger-elicitation-request' tool.\n *\n * If the client does not support the elicitation capability, the tool is not registered.\n *\n * The registered tool sends an elicitation request for the user to provide information\n * based on a pre-defined schema of fields including text inputs, booleans, numbers,\n * email, dates, enums of various types, etc. It uses validation and handles multiple\n * possible outcomes from the user's response, such as acceptance with content, decline,\n * or cancellation of the dialog. The process also ensures parsing and validating\n * the elicitation input arguments at runtime.\n *\n * The elicitation dialog response is returned, formatted into a structured result,\n * which contains both user-submitted input data (if provided) and debugging information,\n * including raw results.\n *\n * @param {McpServer} server - TThe McpServer instance where the tool will be registered.\n */\nexport const registerTriggerElicitationRequestTool = (server: McpServer) => {\n  // Does the client support elicitation?\n  const clientCapabilities = server.server.getClientCapabilities() || {};\n  const clientSupportsElicitation: boolean =\n    clientCapabilities.elicitation !== undefined;\n\n  // If so, register tool\n  if (clientSupportsElicitation) {\n    server.registerTool(\n      name,\n      config,\n      async (args, extra): Promise<CallToolResult> => {\n        const elicitationResult = await extra.sendRequest(\n          {\n            method: \"elicitation/create\",\n            params: {\n              message: \"Please provide inputs for the following fields:\",\n              requestedSchema: {\n                type: \"object\",\n                properties: {\n                  name: {\n                    title: \"String\",\n                    type: \"string\",\n                    description: \"Your full, legal name\",\n                  },\n                  check: {\n                    title: \"Boolean\",\n                    type: \"boolean\",\n                    description: \"Agree to the terms and conditions\",\n                  },\n                  firstLine: {\n                    title: \"String with default\",\n                    type: \"string\",\n                    description: \"Favorite first line of a story\",\n                    default: \"It was a dark and stormy night.\",\n                  },\n                  email: {\n                    title: \"String with email format\",\n                    type: \"string\",\n                    format: \"email\",\n                    description:\n                      \"Your email address (will be verified, and never shared with anyone else)\",\n                  },\n                  homepage: {\n                    type: \"string\",\n                    format: \"uri\",\n                    title: \"String with uri format\",\n                    description: \"Portfolio / personal website\",\n                  },\n                  birthdate: {\n                    title: \"String with date format\",\n                    type: \"string\",\n                    format: \"date\",\n                    description: \"Your date of birth\",\n                  },\n                  integer: {\n                    title: \"Integer\",\n                    type: \"integer\",\n                    description:\n                      \"Your favorite integer (do not give us your phone number, pin, or other sensitive info)\",\n                    minimum: 1,\n                    maximum: 100,\n                    default: 42,\n                  },\n                  number: {\n                    title: \"Number in range 1-1000\",\n                    type: \"number\",\n                    description: \"Favorite number (there are no wrong answers)\",\n                    minimum: 0,\n                    maximum: 1000,\n                    default: 3.14,\n                  },\n                  untitledSingleSelectEnum: {\n                    type: \"string\",\n                    title: \"Untitled Single Select Enum\",\n                    description: \"Choose your favorite friend\",\n                    enum: [\n                      \"Monica\",\n                      \"Rachel\",\n                      \"Joey\",\n                      \"Chandler\",\n                      \"Ross\",\n                      \"Phoebe\",\n                    ],\n                    default: \"Monica\",\n                  },\n                  untitledMultipleSelectEnum: {\n                    type: \"array\",\n                    title: \"Untitled Multiple Select Enum\",\n                    description: \"Choose your favorite instruments\",\n                    minItems: 1,\n                    maxItems: 3,\n                    items: {\n                      type: \"string\",\n                      enum: [\"Guitar\", \"Piano\", \"Violin\", \"Drums\", \"Bass\"],\n                    },\n                    default: [\"Guitar\"],\n                  },\n                  titledSingleSelectEnum: {\n                    type: \"string\",\n                    title: \"Titled Single Select Enum\",\n                    description: \"Choose your favorite hero\",\n                    oneOf: [\n                      { const: \"hero-1\", title: \"Superman\" },\n                      { const: \"hero-2\", title: \"Green Lantern\" },\n                      { const: \"hero-3\", title: \"Wonder Woman\" },\n                    ],\n                    default: \"hero-1\",\n                  },\n                  titledMultipleSelectEnum: {\n                    type: \"array\",\n                    title: \"Titled Multiple Select Enum\",\n                    description: \"Choose your favorite types of fish\",\n                    minItems: 1,\n                    maxItems: 3,\n                    items: {\n                      anyOf: [\n                        { const: \"fish-1\", title: \"Tuna\" },\n                        { const: \"fish-2\", title: \"Salmon\" },\n                        { const: \"fish-3\", title: \"Trout\" },\n                      ],\n                    },\n                    default: [\"fish-1\"],\n                  },\n                  legacyTitledEnum: {\n                    type: \"string\",\n                    title: \"Legacy Titled Single Select Enum\",\n                    description: \"Choose your favorite type of pet\",\n                    enum: [\"pet-1\", \"pet-2\", \"pet-3\", \"pet-4\", \"pet-5\"],\n                    enumNames: [\"Cats\", \"Dogs\", \"Birds\", \"Fish\", \"Reptiles\"],\n                    default: \"pet-1\",\n                  },\n                },\n                required: [\"name\"],\n              },\n            },\n          },\n          ElicitResultSchema,\n          { timeout: 10 * 60 * 1000 /* 10 minutes */ }\n        );\n\n        // Handle different response actions\n        const content: CallToolResult[\"content\"] = [];\n\n        if (\n          elicitationResult.action === \"accept\" &&\n          elicitationResult.content\n        ) {\n          content.push({\n            type: \"text\",\n            text: `✅ User provided the requested information!`,\n          });\n\n          // Only access elicitationResult.content when action is accept\n          const userData = elicitationResult.content;\n          const lines = [];\n          if (userData.name) lines.push(`- Name: ${userData.name}`);\n          if (userData.check !== undefined)\n            lines.push(`- Agreed to terms: ${userData.check}`);\n          if (userData.color) lines.push(`- Favorite Color: ${userData.color}`);\n          if (userData.email) lines.push(`- Email: ${userData.email}`);\n          if (userData.homepage) lines.push(`- Homepage: ${userData.homepage}`);\n          if (userData.birthdate)\n            lines.push(`- Birthdate: ${userData.birthdate}`);\n          if (userData.integer !== undefined)\n            lines.push(`- Favorite Integer: ${userData.integer}`);\n          if (userData.number !== undefined)\n            lines.push(`- Favorite Number: ${userData.number}`);\n          if (userData.petType) lines.push(`- Pet Type: ${userData.petType}`);\n\n          content.push({\n            type: \"text\",\n            text: `User inputs:\\n${lines.join(\"\\n\")}`,\n          });\n        } else if (elicitationResult.action === \"decline\") {\n          content.push({\n            type: \"text\",\n            text: `❌ User declined to provide the requested information.`,\n          });\n        } else if (elicitationResult.action === \"cancel\") {\n          content.push({\n            type: \"text\",\n            text: `⚠️ User cancelled the elicitation dialog.`,\n          });\n        }\n\n        // Include raw result for debugging\n        content.push({\n          type: \"text\",\n          text: `\\nRaw result: ${JSON.stringify(elicitationResult, null, 2)}`,\n        });\n\n        return { content };\n      }\n    );\n  }\n};\n"
  },
  {
    "path": "src/everything/tools/trigger-long-running-operation.ts",
    "content": "import { z } from \"zod\";\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { CallToolResult } from \"@modelcontextprotocol/sdk/types.js\";\n\n// Tool input schema\nconst TriggerLongRunningOperationSchema = z.object({\n  duration: z\n    .number()\n    .default(10)\n    .describe(\"Duration of the operation in seconds\"),\n  steps: z.number().default(5).describe(\"Number of steps in the operation\"),\n});\n\n// Tool configuration\nconst name = \"trigger-long-running-operation\";\nconst config = {\n  title: \"Trigger Long Running Operation Tool\",\n  description: \"Demonstrates a long running operation with progress updates.\",\n  inputSchema: TriggerLongRunningOperationSchema,\n};\n\n/**\n * Registers the 'trigger-tong-running-operation' tool.\n *\n * The registered tool starts a long-running operation defined by a specific duration and\n * number of steps.\n *\n * Progress notifications are sent back to the client at each step if a `progressToken`\n * is provided in the metadata.\n *\n * At the end of the operation, the tool returns a message indicating the completion of the\n * operation, including the total duration and steps.\n *\n * @param {McpServer} server - The McpServer instance where the tool will be registered.\n */\nexport const registerTriggerLongRunningOperationTool = (server: McpServer) => {\n  server.registerTool(\n    name,\n    config,\n    async (args, extra): Promise<CallToolResult> => {\n      const validatedArgs = TriggerLongRunningOperationSchema.parse(args);\n      const { duration, steps } = validatedArgs;\n      const stepDuration = duration / steps;\n      const progressToken = extra._meta?.progressToken;\n\n      for (let i = 1; i < steps + 1; i++) {\n        await new Promise((resolve) =>\n          setTimeout(resolve, stepDuration * 1000)\n        );\n\n        if (progressToken !== undefined) {\n          await server.server.notification(\n            {\n              method: \"notifications/progress\",\n              params: {\n                progress: i,\n                total: steps,\n                progressToken,\n              },\n            },\n            { relatedRequestId: extra.requestId }\n          );\n        }\n      }\n\n      return {\n        content: [\n          {\n            type: \"text\",\n            text: `Long running operation completed. Duration: ${duration} seconds, Steps: ${steps}.`,\n          },\n        ],\n      };\n    }\n  );\n};\n"
  },
  {
    "path": "src/everything/tools/trigger-sampling-request-async.ts",
    "content": "import { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport {\n  CallToolResult,\n  CreateMessageRequest,\n} from \"@modelcontextprotocol/sdk/types.js\";\nimport { z } from \"zod\";\n\n// Tool input schema\nconst TriggerSamplingRequestAsyncSchema = z.object({\n  prompt: z.string().describe(\"The prompt to send to the LLM\"),\n  maxTokens: z\n    .number()\n    .default(100)\n    .describe(\"Maximum number of tokens to generate\"),\n});\n\n// Tool configuration\nconst name = \"trigger-sampling-request-async\";\nconst config = {\n  title: \"Trigger Async Sampling Request Tool\",\n  description:\n    \"Trigger an async sampling request that the CLIENT executes as a background task. \" +\n    \"Demonstrates bidirectional MCP tasks where the server sends a request and the client \" +\n    \"executes it asynchronously, allowing the server to poll for progress and results.\",\n  inputSchema: TriggerSamplingRequestAsyncSchema,\n};\n\n// Poll interval in milliseconds\nconst POLL_INTERVAL = 1000;\n\n// Maximum poll attempts before timeout\nconst MAX_POLL_ATTEMPTS = 60;\n\n/**\n * Registers the 'trigger-sampling-request-async' tool.\n *\n * This tool demonstrates bidirectional MCP tasks:\n * - Server sends sampling request to client with task metadata\n * - Client creates a task and returns CreateTaskResult\n * - Server polls client's tasks/get endpoint for status\n * - Server fetches final result from client's tasks/result endpoint\n *\n * @param {McpServer} server - The McpServer instance where the tool will be registered.\n */\nexport const registerTriggerSamplingRequestAsyncTool = (server: McpServer) => {\n  // Check client capabilities\n  const clientCapabilities = server.server.getClientCapabilities() || {};\n\n  // Client must support sampling AND tasks.requests.sampling\n  const clientSupportsSampling = clientCapabilities.sampling !== undefined;\n  const clientTasksCapability = clientCapabilities.tasks as\n    | {\n        requests?: { sampling?: { createMessage?: object } };\n      }\n    | undefined;\n  const clientSupportsAsyncSampling =\n    clientTasksCapability?.requests?.sampling?.createMessage !== undefined;\n\n  if (clientSupportsSampling && clientSupportsAsyncSampling) {\n    server.registerTool(\n      name,\n      config,\n      async (args, extra): Promise<CallToolResult> => {\n        const validatedArgs = TriggerSamplingRequestAsyncSchema.parse(args);\n        const { prompt, maxTokens } = validatedArgs;\n\n        // Create the sampling request WITH task metadata\n        // The params.task field signals to the client that this should be executed as a task\n        const request: CreateMessageRequest & {\n          params: { task?: { ttl: number } };\n        } = {\n          method: \"sampling/createMessage\",\n          params: {\n            task: {\n              ttl: 300000, // 5 minutes\n            },\n            messages: [\n              {\n                role: \"user\",\n                content: {\n                  type: \"text\",\n                  text: `Resource ${name} context: ${prompt}`,\n                },\n              },\n            ],\n            systemPrompt: \"You are a helpful test server.\",\n            maxTokens,\n            temperature: 0.7,\n          },\n        };\n\n        // Send the sampling request\n        // Client may return either:\n        // - CreateMessageResult (synchronous execution)\n        // - CreateTaskResult (task-based execution with { task } object)\n        const samplingResponse = await extra.sendRequest(\n          request,\n          z.union([\n            // CreateTaskResult - client created a task\n            z.object({\n              task: z.object({\n                taskId: z.string(),\n                status: z.string(),\n                pollInterval: z.number().optional(),\n                statusMessage: z.string().optional(),\n              }),\n            }),\n            // CreateMessageResult - synchronous execution\n            z.object({\n              role: z.string(),\n              content: z.any(),\n              model: z.string(),\n              stopReason: z.string().optional(),\n            }),\n          ])\n        );\n\n        // Check if client returned CreateTaskResult (has task object)\n        const isTaskResult =\n          \"task\" in samplingResponse && samplingResponse.task;\n        if (!isTaskResult) {\n          // Client executed synchronously - return the direct response\n          return {\n            content: [\n              {\n                type: \"text\",\n                text: `[SYNC] Client executed synchronously:\\n${JSON.stringify(\n                  samplingResponse,\n                  null,\n                  2\n                )}`,\n              },\n            ],\n          };\n        }\n\n        const taskId = samplingResponse.task.taskId;\n        const statusMessages: string[] = [];\n        statusMessages.push(`Task created: ${taskId}`);\n\n        // Poll for task completion\n        let attempts = 0;\n        let taskStatus = samplingResponse.task.status;\n        let taskStatusMessage: string | undefined;\n\n        while (\n          taskStatus !== \"completed\" &&\n          taskStatus !== \"failed\" &&\n          taskStatus !== \"cancelled\" &&\n          attempts < MAX_POLL_ATTEMPTS\n        ) {\n          // Wait before polling\n          await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL));\n          attempts++;\n\n          // Get task status from client\n          const pollResult = await extra.sendRequest(\n            {\n              method: \"tasks/get\",\n              params: { taskId },\n            },\n            z\n              .object({\n                status: z.string(),\n                statusMessage: z.string().optional(),\n              })\n              .passthrough()\n          );\n\n          taskStatus = pollResult.status;\n          taskStatusMessage = pollResult.statusMessage;\n          statusMessages.push(\n            `Poll ${attempts}: ${taskStatus}${\n              taskStatusMessage ? ` - ${taskStatusMessage}` : \"\"\n            }`\n          );\n        }\n\n        // Check for timeout\n        if (attempts >= MAX_POLL_ATTEMPTS) {\n          return {\n            content: [\n              {\n                type: \"text\",\n                text: `[TIMEOUT] Task timed out after ${MAX_POLL_ATTEMPTS} poll attempts\\n\\nProgress:\\n${statusMessages.join(\n                  \"\\n\"\n                )}`,\n              },\n            ],\n          };\n        }\n\n        // Check for failure/cancellation\n        if (taskStatus === \"failed\" || taskStatus === \"cancelled\") {\n          return {\n            content: [\n              {\n                type: \"text\",\n                text: `[${taskStatus.toUpperCase()}] ${\n                  taskStatusMessage || \"No message\"\n                }\\n\\nProgress:\\n${statusMessages.join(\"\\n\")}`,\n              },\n            ],\n          };\n        }\n\n        // Fetch the final result\n        const result = await extra.sendRequest(\n          {\n            method: \"tasks/result\",\n            params: { taskId },\n          },\n          z.any()\n        );\n\n        // Return the result with status history\n        return {\n          content: [\n            {\n              type: \"text\",\n              text: `[COMPLETED] Async sampling completed!\\n\\n**Progress:**\\n${statusMessages.join(\n                \"\\n\"\n              )}\\n\\n**Result:**\\n${JSON.stringify(result, null, 2)}`,\n            },\n          ],\n        };\n      }\n    );\n  }\n};\n"
  },
  {
    "path": "src/everything/tools/trigger-sampling-request.ts",
    "content": "import { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport {\n  CallToolResult,\n  CreateMessageRequest,\n  CreateMessageResultSchema,\n} from \"@modelcontextprotocol/sdk/types.js\";\nimport { z } from \"zod\";\n\n// Tool input schema\nconst TriggerSamplingRequestSchema = z.object({\n  prompt: z.string().describe(\"The prompt to send to the LLM\"),\n  maxTokens: z\n    .number()\n    .default(100)\n    .describe(\"Maximum number of tokens to generate\"),\n});\n\n// Tool configuration\nconst name = \"trigger-sampling-request\";\nconst config = {\n  title: \"Trigger Sampling Request Tool\",\n  description: \"Trigger a Request from the Server for LLM Sampling\",\n  inputSchema: TriggerSamplingRequestSchema,\n};\n\n/**\n * Registers the 'trigger-sampling-request' tool.\n *\n * If the client does not support the sampling capability, the tool is not registered.\n *\n * The registered tool performs the following operations:\n * - Validates incoming arguments using `TriggerSamplingRequestSchema`.\n * - Constructs a `sampling/createMessage` request object using provided prompt and maximum tokens.\n * - Sends the request to the server for sampling.\n * - Formats and returns the sampling result content to the client.\n *\n * @param {McpServer} server - The McpServer instance where the tool will be registered.\n */\nexport const registerTriggerSamplingRequestTool = (server: McpServer) => {\n  // Does the client support sampling?\n  const clientCapabilities = server.server.getClientCapabilities() || {};\n  const clientSupportsSampling: boolean =\n    clientCapabilities.sampling !== undefined;\n\n  // If so, register tool\n  if (clientSupportsSampling) {\n    server.registerTool(\n      name,\n      config,\n      async (args, extra): Promise<CallToolResult> => {\n        const validatedArgs = TriggerSamplingRequestSchema.parse(args);\n        const { prompt, maxTokens } = validatedArgs;\n\n        // Create the sampling request\n        const request: CreateMessageRequest = {\n          method: \"sampling/createMessage\",\n          params: {\n            messages: [\n              {\n                role: \"user\",\n                content: {\n                  type: \"text\",\n                  text: `Resource ${name} context: ${prompt}`,\n                },\n              },\n            ],\n            systemPrompt: \"You are a helpful test server.\",\n            maxTokens,\n            temperature: 0.7,\n          },\n        };\n\n        // Send the sampling request to the client\n        const result = await extra.sendRequest(\n          request,\n          CreateMessageResultSchema\n        );\n\n        // Return the result to the client\n        return {\n          content: [\n            {\n              type: \"text\",\n              text: `LLM sampling result: \\n${JSON.stringify(result, null, 2)}`,\n            },\n          ],\n        };\n      }\n    );\n  }\n};\n"
  },
  {
    "path": "src/everything/transports/sse.ts",
    "content": "import { SSEServerTransport } from \"@modelcontextprotocol/sdk/server/sse.js\";\nimport express from \"express\";\nimport { createServer } from \"../server/index.js\";\nimport cors from \"cors\";\n\nconsole.error(\"Starting SSE server...\");\n\n// Express app with permissive CORS for testing with Inspector direct connect mode\nconst app = express();\napp.use(\n  cors({\n    origin: \"*\", // use \"*\" with caution in production\n    methods: \"GET,POST\",\n    preflightContinue: false,\n    optionsSuccessStatus: 204,\n  })\n);\n\n// Map sessionId to transport for each client\nconst transports: Map<string, SSEServerTransport> = new Map<\n  string,\n  SSEServerTransport\n>();\n\n// Handle GET requests for new SSE streams\napp.get(\"/sse\", async (req, res) => {\n  let transport: SSEServerTransport;\n  const { server, cleanup } = createServer();\n\n  // Session Id should not exist for GET /sse requests\n  if (req?.query?.sessionId) {\n    const sessionId = req?.query?.sessionId as string;\n    transport = transports.get(sessionId) as SSEServerTransport;\n    console.error(\n      \"Client Reconnecting? This shouldn't happen; when client has a sessionId, GET /sse should not be called again.\",\n      transport.sessionId\n    );\n  } else {\n    // Create and store transport for the new session\n    transport = new SSEServerTransport(\"/message\", res);\n    transports.set(transport.sessionId, transport);\n\n    // Connect server to transport\n    await server.connect(transport);\n    const sessionId = transport.sessionId;\n    console.error(\"Client Connected: \", sessionId);\n\n    // Handle close of connection\n    server.server.onclose = async () => {\n      const sessionId = transport.sessionId;\n      console.error(\"Client Disconnected: \", sessionId);\n      transports.delete(sessionId);\n      cleanup(sessionId);\n    };\n  }\n});\n\n// Handle POST requests for client messages\napp.post(\"/message\", async (req, res) => {\n  // Session Id should exist for POST /message requests\n  const sessionId = req?.query?.sessionId as string;\n\n  // Get the transport for this session and use it to handle the request\n  const transport = transports.get(sessionId);\n  if (transport) {\n    console.error(\"Client Message from\", sessionId);\n    await transport.handlePostMessage(req, res);\n  } else {\n    console.error(`No transport found for sessionId ${sessionId}`);\n  }\n});\n\n// Start the express server\nconst PORT = process.env.PORT || 3001;\napp.listen(PORT, () => {\n  console.error(`Server is running on port ${PORT}`);\n});\n"
  },
  {
    "path": "src/everything/transports/stdio.ts",
    "content": "#!/usr/bin/env node\n\nimport { StdioServerTransport } from \"@modelcontextprotocol/sdk/server/stdio.js\";\nimport { createServer } from \"../server/index.js\";\n\nconsole.error(\"Starting default (STDIO) server...\");\n\n/**\n * The main method\n * - Initializes the StdioServerTransport, sets up the server,\n * - Handles cleanup on process exit.\n *\n * @return {Promise<void>} A promise that resolves when the main function has executed and the process exits.\n */\nasync function main(): Promise<void> {\n  const transport = new StdioServerTransport();\n  const { server, cleanup } = createServer();\n\n  // Connect transport to server\n  await server.connect(transport);\n\n  // Cleanup on exit\n  process.on(\"SIGINT\", async () => {\n    await server.close();\n    cleanup();\n    process.exit(0);\n  });\n}\n\nmain().catch((error) => {\n  console.error(\"Server error:\", error);\n  process.exit(1);\n});\n"
  },
  {
    "path": "src/everything/transports/streamableHttp.ts",
    "content": "import {\n  StreamableHTTPServerTransport,\n  EventStore,\n} from \"@modelcontextprotocol/sdk/server/streamableHttp.js\";\nimport express, { Request, Response } from \"express\";\nimport { createServer } from \"../server/index.js\";\nimport { randomUUID } from \"node:crypto\";\nimport cors from \"cors\";\n\n// Simple in-memory event store for SSE resumability\nclass InMemoryEventStore implements EventStore {\n  private events: Map<string, { streamId: string; message: unknown }> =\n    new Map();\n\n  async storeEvent(streamId: string, message: unknown): Promise<string> {\n    const eventId = randomUUID();\n    this.events.set(eventId, { streamId, message });\n    return eventId;\n  }\n\n  async replayEventsAfter(\n    lastEventId: string,\n    { send }: { send: (eventId: string, message: unknown) => Promise<void> }\n  ): Promise<string> {\n    const entries = Array.from(this.events.entries());\n    const startIndex = entries.findIndex(([id]) => id === lastEventId);\n    if (startIndex === -1) return lastEventId;\n\n    let lastId: string = lastEventId;\n    for (let i = startIndex + 1; i < entries.length; i++) {\n      const [eventId, { message }] = entries[i];\n      await send(eventId, message);\n      lastId = eventId;\n    }\n    return lastId;\n  }\n}\n\nconsole.log(\"Starting Streamable HTTP server...\");\n\n// Express app with permissive CORS for testing with Inspector direct connect mode\nconst app = express();\napp.use(\n  cors({\n    origin: \"*\", // use \"*\" with caution in production\n    methods: \"GET,POST,DELETE\",\n    preflightContinue: false,\n    optionsSuccessStatus: 204,\n    exposedHeaders: [\"mcp-session-id\", \"last-event-id\", \"mcp-protocol-version\"],\n  })\n);\n\n// Map sessionId to server transport for each client\nconst transports: Map<string, StreamableHTTPServerTransport> = new Map<\n  string,\n  StreamableHTTPServerTransport\n>();\n\n// Handle POST requests for client messages\napp.post(\"/mcp\", async (req: Request, res: Response) => {\n  console.log(\"Received MCP POST request\");\n  try {\n    // Check for existing session ID\n    const sessionId = req.headers[\"mcp-session-id\"] as string | undefined;\n\n    let transport: StreamableHTTPServerTransport;\n\n    if (sessionId && transports.has(sessionId)) {\n      // Reuse existing transport\n      transport = transports.get(sessionId)!;\n    } else if (!sessionId) {\n      const { server, cleanup } = createServer();\n\n      // New initialization request\n      const eventStore = new InMemoryEventStore();\n      transport = new StreamableHTTPServerTransport({\n        sessionIdGenerator: () => randomUUID(),\n        eventStore, // Enable resumability\n        onsessioninitialized: (sessionId: string) => {\n          // Store the transport by session ID when a session is initialized\n          // This avoids race conditions where requests might come in before the session is stored\n          console.log(`Session initialized with ID: ${sessionId}`);\n          transports.set(sessionId, transport);\n        },\n      });\n\n      // Set up onclose handler to clean up transport when closed\n      server.server.onclose = async () => {\n        const sid = transport.sessionId;\n        if (sid && transports.has(sid)) {\n          console.log(\n            `Transport closed for session ${sid}, removing from transports map`\n          );\n          transports.delete(sid);\n          cleanup(sid);\n        }\n      };\n\n      // Connect the transport to the MCP server BEFORE handling the request\n      // so responses can flow back through the same transport\n      await server.connect(transport);\n      await transport.handleRequest(req, res);\n      return;\n    } else {\n      // Invalid request - no session ID or not initialization request\n      res.status(400).json({\n        jsonrpc: \"2.0\",\n        error: {\n          code: -32000,\n          message: \"Bad Request: No valid session ID provided\",\n        },\n        id: req?.body?.id,\n      });\n      return;\n    }\n\n    // Handle the request with existing transport - no need to reconnect\n    // The existing transport is already connected to the server\n    await transport.handleRequest(req, res);\n  } catch (error) {\n    console.log(\"Error handling MCP request:\", error);\n    if (!res.headersSent) {\n      res.status(500).json({\n        jsonrpc: \"2.0\",\n        error: {\n          code: -32603,\n          message: \"Internal server error\",\n        },\n        id: req?.body?.id,\n      });\n      return;\n    }\n  }\n});\n\n// Handle GET requests for SSE streams\napp.get(\"/mcp\", async (req: Request, res: Response) => {\n  console.log(\"Received MCP GET request\");\n  const sessionId = req.headers[\"mcp-session-id\"] as string | undefined;\n  if (!sessionId || !transports.has(sessionId)) {\n    res.status(400).json({\n      jsonrpc: \"2.0\",\n      error: {\n        code: -32000,\n        message: \"Bad Request: No valid session ID provided\",\n      },\n      id: req?.body?.id,\n    });\n    return;\n  }\n\n  // Check for Last-Event-ID header for resumability\n  const lastEventId = req.headers[\"last-event-id\"] as string | undefined;\n  if (lastEventId) {\n    console.log(`Client reconnecting with Last-Event-ID: ${lastEventId}`);\n  } else {\n    console.log(`Establishing new SSE stream for session ${sessionId}`);\n  }\n\n  const transport = transports.get(sessionId);\n  await transport!.handleRequest(req, res);\n});\n\n// Handle DELETE requests for session termination\napp.delete(\"/mcp\", async (req: Request, res: Response) => {\n  const sessionId = req.headers[\"mcp-session-id\"] as string | undefined;\n  if (!sessionId || !transports.has(sessionId)) {\n    res.status(400).json({\n      jsonrpc: \"2.0\",\n      error: {\n        code: -32000,\n        message: \"Bad Request: No valid session ID provided\",\n      },\n      id: req?.body?.id,\n    });\n    return;\n  }\n\n  console.log(`Received session termination request for session ${sessionId}`);\n\n  try {\n    const transport = transports.get(sessionId);\n    await transport!.handleRequest(req, res);\n  } catch (error) {\n    console.log(\"Error handling session termination:\", error);\n    if (!res.headersSent) {\n      res.status(500).json({\n        jsonrpc: \"2.0\",\n        error: {\n          code: -32603,\n          message: \"Error handling session termination\",\n        },\n        id: req?.body?.id,\n      });\n      return;\n    }\n  }\n});\n\n// Start the server\nconst PORT = process.env.PORT || 3001;\nconst server = app.listen(PORT, () => {\n  console.error(`MCP Streamable HTTP Server listening on port ${PORT}`);\n});\n\n// Handle server errors\nserver.on(\"error\", (err: unknown) => {\n  const code =\n    typeof err === \"object\" && err !== null && \"code\" in err\n      ? (err as { code?: unknown }).code\n      : undefined;\n  if (code === \"EADDRINUSE\") {\n    console.error(\n      `Failed to start: Port ${PORT} is already in use. Set PORT to a free port or stop the conflicting process.`\n    );\n  } else {\n    console.error(\"HTTP server encountered an error while starting:\", err);\n  }\n  // Ensure a non-zero exit so npm reports the failure instead of silently exiting\n  process.exit(1);\n});\n\n// Handle server shutdown\nprocess.on(\"SIGINT\", async () => {\n  console.log(\"Shutting down server...\");\n\n  // Close all active transports to properly clean up resources\n  for (const sessionId in transports) {\n    try {\n      console.log(`Closing transport for session ${sessionId}`);\n      await transports.get(sessionId)!.close();\n      transports.delete(sessionId);\n    } catch (error) {\n      console.log(`Error closing transport for session ${sessionId}:`, error);\n    }\n  }\n\n  console.log(\"Server shutdown complete\");\n  process.exit(0);\n});\n"
  },
  {
    "path": "src/everything/tsconfig.json",
    "content": "{\n  \"extends\": \"../../tsconfig.json\",\n  \"compilerOptions\": {\n    \"outDir\": \"./dist\",\n    \"rootDir\": \".\"\n  },\n  \"include\": [\"./**/*.ts\"]\n}\n"
  },
  {
    "path": "src/everything/vitest.config.ts",
    "content": "import { defineConfig } from 'vitest/config';\n\nexport default defineConfig({\n  test: {\n    globals: true,\n    environment: 'node',\n    include: ['**/__tests__/**/*.test.ts'],\n    coverage: {\n      provider: 'v8',\n      include: ['**/*.ts'],\n      exclude: ['**/__tests__/**', '**/dist/**'],\n    },\n  },\n});\n"
  },
  {
    "path": "src/fetch/.python-version",
    "content": "3.11\n"
  },
  {
    "path": "src/fetch/Dockerfile",
    "content": "# Use a Python image with uv pre-installed\nFROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim AS uv\n\n# Install the project into `/app`\nWORKDIR /app\n\n# Enable bytecode compilation\nENV UV_COMPILE_BYTECODE=1\n\n# Copy from the cache instead of linking since it's a mounted volume\nENV UV_LINK_MODE=copy\n\n# Install the project's dependencies using the lockfile and settings\nRUN --mount=type=cache,target=/root/.cache/uv \\\n    --mount=type=bind,source=uv.lock,target=uv.lock \\\n    --mount=type=bind,source=pyproject.toml,target=pyproject.toml \\\n    uv sync --locked --no-install-project --no-dev --no-editable\n\n# Then, add the rest of the project source code and install it\n# Installing separately from its dependencies allows optimal layer caching\nADD . /app\nRUN --mount=type=cache,target=/root/.cache/uv \\\n    uv sync --locked --no-dev --no-editable\n\nFROM python:3.12-slim-bookworm\n\nWORKDIR /app\n \nCOPY --from=uv /root/.local /root/.local\nCOPY --from=uv --chown=app:app /app/.venv /app/.venv\n\n# Place executables in the environment at the front of the path\nENV PATH=\"/app/.venv/bin:$PATH\"\n\n# when running the container, add --db-path and a bind mount to the host's db file\nENTRYPOINT [\"mcp-server-fetch\"]\n"
  },
  {
    "path": "src/fetch/LICENSE",
    "content": "Copyright (c) 2024 Anthropic, PBC.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"
  },
  {
    "path": "src/fetch/README.md",
    "content": "# Fetch MCP Server\n\n<!-- mcp-name: io.github.modelcontextprotocol/server-fetch -->\n\nA Model Context Protocol server that provides web content fetching capabilities. This server enables LLMs to retrieve and process content from web pages, converting HTML to markdown for easier consumption.\n\n> [!CAUTION]\n> This server can access local/internal IP addresses and may represent a security risk. Exercise caution when using this MCP server to ensure this does not expose any sensitive data.\n\nThe fetch tool will truncate the response, but by using the `start_index` argument, you can specify where to start the content extraction. This lets models read a webpage in chunks, until they find the information they need.\n\n### Available Tools\n\n- `fetch` - Fetches a URL from the internet and extracts its contents as markdown.\n    - `url` (string, required): URL to fetch\n    - `max_length` (integer, optional): Maximum number of characters to return (default: 5000)\n    - `start_index` (integer, optional): Start content from this character index (default: 0)\n    - `raw` (boolean, optional): Get raw content without markdown conversion (default: false)\n\n### Prompts\n\n- **fetch**\n  - Fetch a URL and extract its contents as markdown\n  - Arguments:\n    - `url` (string, required): URL to fetch\n\n## Installation\n\nOptionally: Install node.js, this will cause the fetch server to use a different HTML simplifier that is more robust.\n\n### Using uv (recommended)\n\nWhen using [`uv`](https://docs.astral.sh/uv/) no specific installation is needed. We will\nuse [`uvx`](https://docs.astral.sh/uv/guides/tools/) to directly run *mcp-server-fetch*.\n\n### Using PIP\n\nAlternatively you can install `mcp-server-fetch` via pip:\n\n```\npip install mcp-server-fetch\n```\n\nAfter installation, you can run it as a script using:\n\n```\npython -m mcp_server_fetch\n```\n\n## Configuration\n\n### Configure for Claude.app\n\nAdd to your Claude settings:\n\n<details>\n<summary>Using uvx</summary>\n\n```json\n{\n  \"mcpServers\": {\n    \"fetch\": {\n      \"command\": \"uvx\",\n      \"args\": [\"mcp-server-fetch\"]\n    }\n  }\n}\n```\n</details>\n\n<details>\n<summary>Using docker</summary>\n\n```json\n{\n  \"mcpServers\": {\n    \"fetch\": {\n      \"command\": \"docker\",\n      \"args\": [\"run\", \"-i\", \"--rm\", \"mcp/fetch\"]\n    }\n  }\n}\n```\n</details>\n\n<details>\n<summary>Using pip installation</summary>\n\n```json\n{\n  \"mcpServers\": {\n    \"fetch\": {\n      \"command\": \"python\",\n      \"args\": [\"-m\", \"mcp_server_fetch\"]\n    }\n  }\n}\n```\n</details>\n\n### Configure for VS Code\n\nFor quick installation, use one of the one-click install buttons below...\n\n[![Install with UV in VS Code](https://img.shields.io/badge/VS_Code-UV-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=fetch&config=%7B%22command%22%3A%22uvx%22%2C%22args%22%3A%5B%22mcp-server-fetch%22%5D%7D) [![Install with UV in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-UV-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=fetch&config=%7B%22command%22%3A%22uvx%22%2C%22args%22%3A%5B%22mcp-server-fetch%22%5D%7D&quality=insiders)\n\n[![Install with Docker in VS Code](https://img.shields.io/badge/VS_Code-Docker-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=fetch&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22--rm%22%2C%22mcp%2Ffetch%22%5D%7D) [![Install with Docker in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Docker-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=fetch&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22--rm%22%2C%22mcp%2Ffetch%22%5D%7D&quality=insiders)\n\nFor manual installation, add the following JSON block to your User Settings (JSON) file in VS Code. You can do this by pressing `Ctrl + Shift + P` and typing `Preferences: Open User Settings (JSON)`.\n\nOptionally, you can add it to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others.\n\n> Note that the `mcp` key is needed when using the `mcp.json` file.\n\n<details>\n<summary>Using uvx</summary>\n\n```json\n{\n  \"mcp\": {\n    \"servers\": {\n      \"fetch\": {\n        \"command\": \"uvx\",\n        \"args\": [\"mcp-server-fetch\"]\n      }\n    }\n  }\n}\n```\n</details>\n\n<details>\n<summary>Using Docker</summary>\n\n```json\n{\n  \"mcp\": {\n    \"servers\": {\n      \"fetch\": {\n        \"command\": \"docker\",\n        \"args\": [\"run\", \"-i\", \"--rm\", \"mcp/fetch\"]\n      }\n    }\n  }\n}\n```\n</details>\n\n### Customization - robots.txt\n\nBy default, the server will obey a websites robots.txt file if the request came from the model (via a tool), but not if\nthe request was user initiated (via a prompt). This can be disabled by adding the argument `--ignore-robots-txt` to the\n`args` list in the configuration.\n\n### Customization - User-agent\n\nBy default, depending on if the request came from the model (via a tool), or was user initiated (via a prompt), the\nserver will use either the user-agent\n```\nModelContextProtocol/1.0 (Autonomous; +https://github.com/modelcontextprotocol/servers)\n```\nor\n```\nModelContextProtocol/1.0 (User-Specified; +https://github.com/modelcontextprotocol/servers)\n```\n\nThis can be customized by adding the argument `--user-agent=YourUserAgent` to the `args` list in the configuration.\n\n### Customization - Proxy\n\nThe server can be configured to use a proxy by using the `--proxy-url` argument.\n\n## Windows Configuration\n\nIf you're experiencing timeout issues on Windows, you may need to set the `PYTHONIOENCODING` environment variable to ensure proper character encoding:\n\n<details>\n<summary>Windows configuration (uvx)</summary>\n\n```json\n{\n  \"mcpServers\": {\n    \"fetch\": {\n      \"command\": \"uvx\",\n      \"args\": [\"mcp-server-fetch\"],\n      \"env\": {\n        \"PYTHONIOENCODING\": \"utf-8\"\n      }\n    }\n  }\n}\n```\n</details>\n\n<details>\n<summary>Windows configuration (pip)</summary>\n\n```json\n{\n  \"mcpServers\": {\n    \"fetch\": {\n      \"command\": \"python\",\n      \"args\": [\"-m\", \"mcp_server_fetch\"],\n      \"env\": {\n        \"PYTHONIOENCODING\": \"utf-8\"\n      }\n    }\n  }\n}\n```\n</details>\n\nThis addresses character encoding issues that can cause the server to timeout on Windows systems.\n\n## Debugging\n\nYou can use the MCP inspector to debug the server. For uvx installations:\n\n```\nnpx @modelcontextprotocol/inspector uvx mcp-server-fetch\n```\n\nOr if you've installed the package in a specific directory or are developing on it:\n\n```\ncd path/to/servers/src/fetch\nnpx @modelcontextprotocol/inspector uv run mcp-server-fetch\n```\n\n## Contributing\n\nWe encourage contributions to help expand and improve mcp-server-fetch. Whether you want to add new tools, enhance existing functionality, or improve documentation, your input is valuable.\n\nFor examples of other MCP servers and implementation patterns, see:\nhttps://github.com/modelcontextprotocol/servers\n\nPull requests are welcome! Feel free to contribute new ideas, bug fixes, or enhancements to make mcp-server-fetch even more powerful and useful.\n\n## License\n\nmcp-server-fetch is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository.\n"
  },
  {
    "path": "src/fetch/pyproject.toml",
    "content": "[project]\nname = \"mcp-server-fetch\"\nversion = \"0.6.3\"\ndescription = \"A Model Context Protocol server providing tools to fetch and convert web content for usage by LLMs\"\nreadme = \"README.md\"\nrequires-python = \">=3.10\"\nauthors = [{ name = \"Anthropic, PBC.\" }]\nmaintainers = [{ name = \"Jack Adamson\", email = \"jadamson@anthropic.com\" }]\nkeywords = [\"http\", \"mcp\", \"llm\", \"automation\"]\nlicense = { text = \"MIT\" }\nclassifiers = [\n    \"Development Status :: 4 - Beta\",\n    \"Intended Audience :: Developers\",\n    \"License :: OSI Approved :: MIT License\",\n    \"Programming Language :: Python :: 3\",\n    \"Programming Language :: Python :: 3.10\",\n]\ndependencies = [\n    \"httpx>=0.27\",\n    \"markdownify>=0.13.1\",\n    \"mcp>=1.1.3\",\n    \"protego>=0.3.1\",\n    \"pydantic>=2.0.0\",\n    \"readabilipy>=0.2.0\",\n    \"requests>=2.32.3\",\n]\n\n[project.scripts]\nmcp-server-fetch = \"mcp_server_fetch:main\"\n\n[build-system]\nrequires = [\"hatchling\"]\nbuild-backend = \"hatchling.build\"\n\n[tool.uv]\ndev-dependencies = [\"pyright>=1.1.389\", \"ruff>=0.7.3\", \"pytest>=8.0.0\", \"pytest-asyncio>=0.21.0\"]\n\n[tool.pytest.ini_options]\ntestpaths = [\"tests\"]\nasyncio_mode = \"auto\"\n"
  },
  {
    "path": "src/fetch/src/mcp_server_fetch/__init__.py",
    "content": "from .server import serve\n\n\ndef main():\n    \"\"\"MCP Fetch Server - HTTP fetching functionality for MCP\"\"\"\n    import argparse\n    import asyncio\n\n    parser = argparse.ArgumentParser(\n        description=\"give a model the ability to make web requests\"\n    )\n    parser.add_argument(\"--user-agent\", type=str, help=\"Custom User-Agent string\")\n    parser.add_argument(\n        \"--ignore-robots-txt\",\n        action=\"store_true\",\n        help=\"Ignore robots.txt restrictions\",\n    )\n    parser.add_argument(\"--proxy-url\", type=str, help=\"Proxy URL to use for requests\")\n\n    args = parser.parse_args()\n    asyncio.run(serve(args.user_agent, args.ignore_robots_txt, args.proxy_url))\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "src/fetch/src/mcp_server_fetch/__main__.py",
    "content": "# __main__.py\n\nfrom mcp_server_fetch import main\n\nmain()\n"
  },
  {
    "path": "src/fetch/src/mcp_server_fetch/server.py",
    "content": "from typing import Annotated, Tuple\nfrom urllib.parse import urlparse, urlunparse\n\nimport markdownify\nimport readabilipy.simple_json\nfrom mcp.shared.exceptions import McpError\nfrom mcp.server import Server\nfrom mcp.server.stdio import stdio_server\nfrom mcp.types import (\n    ErrorData,\n    GetPromptResult,\n    Prompt,\n    PromptArgument,\n    PromptMessage,\n    TextContent,\n    Tool,\n    INVALID_PARAMS,\n    INTERNAL_ERROR,\n)\nfrom protego import Protego\nfrom pydantic import BaseModel, Field, AnyUrl\n\nDEFAULT_USER_AGENT_AUTONOMOUS = \"ModelContextProtocol/1.0 (Autonomous; +https://github.com/modelcontextprotocol/servers)\"\nDEFAULT_USER_AGENT_MANUAL = \"ModelContextProtocol/1.0 (User-Specified; +https://github.com/modelcontextprotocol/servers)\"\n\n\ndef extract_content_from_html(html: str) -> str:\n    \"\"\"Extract and convert HTML content to Markdown format.\n\n    Args:\n        html: Raw HTML content to process\n\n    Returns:\n        Simplified markdown version of the content\n    \"\"\"\n    ret = readabilipy.simple_json.simple_json_from_html_string(\n        html, use_readability=True\n    )\n    if not ret[\"content\"]:\n        return \"<error>Page failed to be simplified from HTML</error>\"\n    content = markdownify.markdownify(\n        ret[\"content\"],\n        heading_style=markdownify.ATX,\n    )\n    return content\n\n\ndef get_robots_txt_url(url: str) -> str:\n    \"\"\"Get the robots.txt URL for a given website URL.\n\n    Args:\n        url: Website URL to get robots.txt for\n\n    Returns:\n        URL of the robots.txt file\n    \"\"\"\n    # Parse the URL into components\n    parsed = urlparse(url)\n\n    # Reconstruct the base URL with just scheme, netloc, and /robots.txt path\n    robots_url = urlunparse((parsed.scheme, parsed.netloc, \"/robots.txt\", \"\", \"\", \"\"))\n\n    return robots_url\n\n\nasync def check_may_autonomously_fetch_url(url: str, user_agent: str, proxy_url: str | None = None) -> None:\n    \"\"\"\n    Check if the URL can be fetched by the user agent according to the robots.txt file.\n    Raises a McpError if not.\n    \"\"\"\n    from httpx import AsyncClient, HTTPError\n\n    robot_txt_url = get_robots_txt_url(url)\n\n    async with AsyncClient(proxy=proxy_url) as client:\n        try:\n            response = await client.get(\n                robot_txt_url,\n                follow_redirects=True,\n                headers={\"User-Agent\": user_agent},\n            )\n        except HTTPError:\n            raise McpError(ErrorData(\n                code=INTERNAL_ERROR,\n                message=f\"Failed to fetch robots.txt {robot_txt_url} due to a connection issue\",\n            ))\n        if response.status_code in (401, 403):\n            raise McpError(ErrorData(\n                code=INTERNAL_ERROR,\n                message=f\"When fetching robots.txt ({robot_txt_url}), received status {response.status_code} so assuming that autonomous fetching is not allowed, the user can try manually fetching by using the fetch prompt\",\n            ))\n        elif 400 <= response.status_code < 500:\n            return\n        robot_txt = response.text\n    processed_robot_txt = \"\\n\".join(\n        line for line in robot_txt.splitlines() if not line.strip().startswith(\"#\")\n    )\n    robot_parser = Protego.parse(processed_robot_txt)\n    if not robot_parser.can_fetch(str(url), user_agent):\n        raise McpError(ErrorData(\n            code=INTERNAL_ERROR,\n            message=f\"The sites robots.txt ({robot_txt_url}), specifies that autonomous fetching of this page is not allowed, \"\n            f\"<useragent>{user_agent}</useragent>\\n\"\n            f\"<url>{url}</url>\"\n            f\"<robots>\\n{robot_txt}\\n</robots>\\n\"\n            f\"The assistant must let the user know that it failed to view the page. The assistant may provide further guidance based on the above information.\\n\"\n            f\"The assistant can tell the user that they can try manually fetching the page by using the fetch prompt within their UI.\",\n        ))\n\n\nasync def fetch_url(\n    url: str, user_agent: str, force_raw: bool = False, proxy_url: str | None = None\n) -> Tuple[str, str]:\n    \"\"\"\n    Fetch the URL and return the content in a form ready for the LLM, as well as a prefix string with status information.\n    \"\"\"\n    from httpx import AsyncClient, HTTPError\n\n    async with AsyncClient(proxy=proxy_url) as client:\n        try:\n            response = await client.get(\n                url,\n                follow_redirects=True,\n                headers={\"User-Agent\": user_agent},\n                timeout=30,\n            )\n        except HTTPError as e:\n            raise McpError(ErrorData(code=INTERNAL_ERROR, message=f\"Failed to fetch {url}: {e!r}\"))\n        if response.status_code >= 400:\n            raise McpError(ErrorData(\n                code=INTERNAL_ERROR,\n                message=f\"Failed to fetch {url} - status code {response.status_code}\",\n            ))\n\n        page_raw = response.text\n\n    content_type = response.headers.get(\"content-type\", \"\")\n    is_page_html = (\n        \"<html\" in page_raw[:100] or \"text/html\" in content_type or not content_type\n    )\n\n    if is_page_html and not force_raw:\n        return extract_content_from_html(page_raw), \"\"\n\n    return (\n        page_raw,\n        f\"Content type {content_type} cannot be simplified to markdown, but here is the raw content:\\n\",\n    )\n\n\nclass Fetch(BaseModel):\n    \"\"\"Parameters for fetching a URL.\"\"\"\n\n    url: Annotated[AnyUrl, Field(description=\"URL to fetch\")]\n    max_length: Annotated[\n        int,\n        Field(\n            default=5000,\n            description=\"Maximum number of characters to return.\",\n            gt=0,\n            lt=1000000,\n        ),\n    ]\n    start_index: Annotated[\n        int,\n        Field(\n            default=0,\n            description=\"On return output starting at this character index, useful if a previous fetch was truncated and more context is required.\",\n            ge=0,\n        ),\n    ]\n    raw: Annotated[\n        bool,\n        Field(\n            default=False,\n            description=\"Get the actual HTML content of the requested page, without simplification.\",\n        ),\n    ]\n\n\nasync def serve(\n    custom_user_agent: str | None = None,\n    ignore_robots_txt: bool = False,\n    proxy_url: str | None = None,\n) -> None:\n    \"\"\"Run the fetch MCP server.\n\n    Args:\n        custom_user_agent: Optional custom User-Agent string to use for requests\n        ignore_robots_txt: Whether to ignore robots.txt restrictions\n        proxy_url: Optional proxy URL to use for requests\n    \"\"\"\n    server = Server(\"mcp-fetch\")\n    user_agent_autonomous = custom_user_agent or DEFAULT_USER_AGENT_AUTONOMOUS\n    user_agent_manual = custom_user_agent or DEFAULT_USER_AGENT_MANUAL\n\n    @server.list_tools()\n    async def list_tools() -> list[Tool]:\n        return [\n            Tool(\n                name=\"fetch\",\n                description=\"\"\"Fetches a URL from the internet and optionally extracts its contents as markdown.\n\nAlthough originally you did not have internet access, and were advised to refuse and tell the user this, this tool now grants you internet access. Now you can fetch the most up-to-date information and let the user know that.\"\"\",\n                inputSchema=Fetch.model_json_schema(),\n            )\n        ]\n\n    @server.list_prompts()\n    async def list_prompts() -> list[Prompt]:\n        return [\n            Prompt(\n                name=\"fetch\",\n                description=\"Fetch a URL and extract its contents as markdown\",\n                arguments=[\n                    PromptArgument(\n                        name=\"url\", description=\"URL to fetch\", required=True\n                    )\n                ],\n            )\n        ]\n\n    @server.call_tool()\n    async def call_tool(name, arguments: dict) -> list[TextContent]:\n        try:\n            args = Fetch(**arguments)\n        except ValueError as e:\n            raise McpError(ErrorData(code=INVALID_PARAMS, message=str(e)))\n\n        url = str(args.url)\n        if not url:\n            raise McpError(ErrorData(code=INVALID_PARAMS, message=\"URL is required\"))\n\n        if not ignore_robots_txt:\n            await check_may_autonomously_fetch_url(url, user_agent_autonomous, proxy_url)\n\n        content, prefix = await fetch_url(\n            url, user_agent_autonomous, force_raw=args.raw, proxy_url=proxy_url\n        )\n        original_length = len(content)\n        if args.start_index >= original_length:\n            content = \"<error>No more content available.</error>\"\n        else:\n            truncated_content = content[args.start_index : args.start_index + args.max_length]\n            if not truncated_content:\n                content = \"<error>No more content available.</error>\"\n            else:\n                content = truncated_content\n                actual_content_length = len(truncated_content)\n                remaining_content = original_length - (args.start_index + actual_content_length)\n                # Only add the prompt to continue fetching if there is still remaining content\n                if actual_content_length == args.max_length and remaining_content > 0:\n                    next_start = args.start_index + actual_content_length\n                    content += f\"\\n\\n<error>Content truncated. Call the fetch tool with a start_index of {next_start} to get more content.</error>\"\n        return [TextContent(type=\"text\", text=f\"{prefix}Contents of {url}:\\n{content}\")]\n\n    @server.get_prompt()\n    async def get_prompt(name: str, arguments: dict | None) -> GetPromptResult:\n        if not arguments or \"url\" not in arguments:\n            raise McpError(ErrorData(code=INVALID_PARAMS, message=\"URL is required\"))\n\n        url = arguments[\"url\"]\n\n        try:\n            content, prefix = await fetch_url(url, user_agent_manual, proxy_url=proxy_url)\n            # TODO: after SDK bug is addressed, don't catch the exception\n        except McpError as e:\n            return GetPromptResult(\n                description=f\"Failed to fetch {url}\",\n                messages=[\n                    PromptMessage(\n                        role=\"user\",\n                        content=TextContent(type=\"text\", text=str(e)),\n                    )\n                ],\n            )\n        return GetPromptResult(\n            description=f\"Contents of {url}\",\n            messages=[\n                PromptMessage(\n                    role=\"user\", content=TextContent(type=\"text\", text=prefix + content)\n                )\n            ],\n        )\n\n    options = server.create_initialization_options()\n    async with stdio_server() as (read_stream, write_stream):\n        await server.run(read_stream, write_stream, options, raise_exceptions=False)\n"
  },
  {
    "path": "src/fetch/tests/__init__.py",
    "content": ""
  },
  {
    "path": "src/fetch/tests/test_server.py",
    "content": "\"\"\"Tests for the fetch MCP server.\"\"\"\n\nimport pytest\nfrom unittest.mock import AsyncMock, patch, MagicMock\nfrom mcp.shared.exceptions import McpError\n\nfrom mcp_server_fetch.server import (\n    extract_content_from_html,\n    get_robots_txt_url,\n    check_may_autonomously_fetch_url,\n    fetch_url,\n    DEFAULT_USER_AGENT_AUTONOMOUS,\n)\n\n\nclass TestGetRobotsTxtUrl:\n    \"\"\"Tests for get_robots_txt_url function.\"\"\"\n\n    def test_simple_url(self):\n        \"\"\"Test with a simple URL.\"\"\"\n        result = get_robots_txt_url(\"https://example.com/page\")\n        assert result == \"https://example.com/robots.txt\"\n\n    def test_url_with_path(self):\n        \"\"\"Test with URL containing path.\"\"\"\n        result = get_robots_txt_url(\"https://example.com/some/deep/path/page.html\")\n        assert result == \"https://example.com/robots.txt\"\n\n    def test_url_with_query_params(self):\n        \"\"\"Test with URL containing query parameters.\"\"\"\n        result = get_robots_txt_url(\"https://example.com/page?foo=bar&baz=qux\")\n        assert result == \"https://example.com/robots.txt\"\n\n    def test_url_with_port(self):\n        \"\"\"Test with URL containing port number.\"\"\"\n        result = get_robots_txt_url(\"https://example.com:8080/page\")\n        assert result == \"https://example.com:8080/robots.txt\"\n\n    def test_url_with_fragment(self):\n        \"\"\"Test with URL containing fragment.\"\"\"\n        result = get_robots_txt_url(\"https://example.com/page#section\")\n        assert result == \"https://example.com/robots.txt\"\n\n    def test_http_url(self):\n        \"\"\"Test with HTTP URL.\"\"\"\n        result = get_robots_txt_url(\"http://example.com/page\")\n        assert result == \"http://example.com/robots.txt\"\n\n\nclass TestExtractContentFromHtml:\n    \"\"\"Tests for extract_content_from_html function.\"\"\"\n\n    def test_simple_html(self):\n        \"\"\"Test with simple HTML content.\"\"\"\n        html = \"\"\"\n        <html>\n        <head><title>Test Page</title></head>\n        <body>\n            <article>\n                <h1>Hello World</h1>\n                <p>This is a test paragraph.</p>\n            </article>\n        </body>\n        </html>\n        \"\"\"\n        result = extract_content_from_html(html)\n        # readabilipy may extract different parts depending on the content\n        assert \"test paragraph\" in result\n\n    def test_html_with_links(self):\n        \"\"\"Test that links are converted to markdown.\"\"\"\n        html = \"\"\"\n        <html>\n        <body>\n            <article>\n                <p>Visit <a href=\"https://example.com\">Example</a> for more.</p>\n            </article>\n        </body>\n        </html>\n        \"\"\"\n        result = extract_content_from_html(html)\n        assert \"Example\" in result\n\n    def test_empty_content_returns_error(self):\n        \"\"\"Test that empty/invalid HTML returns error message.\"\"\"\n        html = \"\"\n        result = extract_content_from_html(html)\n        assert \"<error>\" in result\n\n\nclass TestCheckMayAutonomouslyFetchUrl:\n    \"\"\"Tests for check_may_autonomously_fetch_url function.\"\"\"\n\n    @pytest.mark.asyncio\n    async def test_allows_when_robots_txt_404(self):\n        \"\"\"Test that fetching is allowed when robots.txt returns 404.\"\"\"\n        mock_response = MagicMock()\n        mock_response.status_code = 404\n\n        with patch(\"httpx.AsyncClient\") as mock_client_class:\n            mock_client = AsyncMock()\n            mock_client.get = AsyncMock(return_value=mock_response)\n            mock_client_class.return_value.__aenter__ = AsyncMock(return_value=mock_client)\n            mock_client_class.return_value.__aexit__ = AsyncMock(return_value=None)\n\n            # Should not raise\n            await check_may_autonomously_fetch_url(\n                \"https://example.com/page\",\n                DEFAULT_USER_AGENT_AUTONOMOUS\n            )\n\n    @pytest.mark.asyncio\n    async def test_blocks_when_robots_txt_401(self):\n        \"\"\"Test that fetching is blocked when robots.txt returns 401.\"\"\"\n        mock_response = MagicMock()\n        mock_response.status_code = 401\n\n        with patch(\"httpx.AsyncClient\") as mock_client_class:\n            mock_client = AsyncMock()\n            mock_client.get = AsyncMock(return_value=mock_response)\n            mock_client_class.return_value.__aenter__ = AsyncMock(return_value=mock_client)\n            mock_client_class.return_value.__aexit__ = AsyncMock(return_value=None)\n\n            with pytest.raises(McpError):\n                await check_may_autonomously_fetch_url(\n                    \"https://example.com/page\",\n                    DEFAULT_USER_AGENT_AUTONOMOUS\n                )\n\n    @pytest.mark.asyncio\n    async def test_blocks_when_robots_txt_403(self):\n        \"\"\"Test that fetching is blocked when robots.txt returns 403.\"\"\"\n        mock_response = MagicMock()\n        mock_response.status_code = 403\n\n        with patch(\"httpx.AsyncClient\") as mock_client_class:\n            mock_client = AsyncMock()\n            mock_client.get = AsyncMock(return_value=mock_response)\n            mock_client_class.return_value.__aenter__ = AsyncMock(return_value=mock_client)\n            mock_client_class.return_value.__aexit__ = AsyncMock(return_value=None)\n\n            with pytest.raises(McpError):\n                await check_may_autonomously_fetch_url(\n                    \"https://example.com/page\",\n                    DEFAULT_USER_AGENT_AUTONOMOUS\n                )\n\n    @pytest.mark.asyncio\n    async def test_allows_when_robots_txt_allows_all(self):\n        \"\"\"Test that fetching is allowed when robots.txt allows all.\"\"\"\n        mock_response = MagicMock()\n        mock_response.status_code = 200\n        mock_response.text = \"User-agent: *\\nAllow: /\"\n\n        with patch(\"httpx.AsyncClient\") as mock_client_class:\n            mock_client = AsyncMock()\n            mock_client.get = AsyncMock(return_value=mock_response)\n            mock_client_class.return_value.__aenter__ = AsyncMock(return_value=mock_client)\n            mock_client_class.return_value.__aexit__ = AsyncMock(return_value=None)\n\n            # Should not raise\n            await check_may_autonomously_fetch_url(\n                \"https://example.com/page\",\n                DEFAULT_USER_AGENT_AUTONOMOUS\n            )\n\n    @pytest.mark.asyncio\n    async def test_blocks_when_robots_txt_disallows_all(self):\n        \"\"\"Test that fetching is blocked when robots.txt disallows all.\"\"\"\n        mock_response = MagicMock()\n        mock_response.status_code = 200\n        mock_response.text = \"User-agent: *\\nDisallow: /\"\n\n        with patch(\"httpx.AsyncClient\") as mock_client_class:\n            mock_client = AsyncMock()\n            mock_client.get = AsyncMock(return_value=mock_response)\n            mock_client_class.return_value.__aenter__ = AsyncMock(return_value=mock_client)\n            mock_client_class.return_value.__aexit__ = AsyncMock(return_value=None)\n\n            with pytest.raises(McpError):\n                await check_may_autonomously_fetch_url(\n                    \"https://example.com/page\",\n                    DEFAULT_USER_AGENT_AUTONOMOUS\n                )\n\n\nclass TestFetchUrl:\n    \"\"\"Tests for fetch_url function.\"\"\"\n\n    @pytest.mark.asyncio\n    async def test_fetch_html_page(self):\n        \"\"\"Test fetching an HTML page returns markdown content.\"\"\"\n        mock_response = MagicMock()\n        mock_response.status_code = 200\n        mock_response.text = \"\"\"\n        <html>\n        <body>\n            <article>\n                <h1>Test Page</h1>\n                <p>Hello World</p>\n            </article>\n        </body>\n        </html>\n        \"\"\"\n        mock_response.headers = {\"content-type\": \"text/html\"}\n\n        with patch(\"httpx.AsyncClient\") as mock_client_class:\n            mock_client = AsyncMock()\n            mock_client.get = AsyncMock(return_value=mock_response)\n            mock_client_class.return_value.__aenter__ = AsyncMock(return_value=mock_client)\n            mock_client_class.return_value.__aexit__ = AsyncMock(return_value=None)\n\n            content, prefix = await fetch_url(\n                \"https://example.com/page\",\n                DEFAULT_USER_AGENT_AUTONOMOUS\n            )\n\n            # HTML is processed, so we check it returns something\n            assert isinstance(content, str)\n            assert prefix == \"\"\n\n    @pytest.mark.asyncio\n    async def test_fetch_html_page_raw(self):\n        \"\"\"Test fetching an HTML page with raw=True returns original HTML.\"\"\"\n        html_content = \"<html><body><h1>Test</h1></body></html>\"\n        mock_response = MagicMock()\n        mock_response.status_code = 200\n        mock_response.text = html_content\n        mock_response.headers = {\"content-type\": \"text/html\"}\n\n        with patch(\"httpx.AsyncClient\") as mock_client_class:\n            mock_client = AsyncMock()\n            mock_client.get = AsyncMock(return_value=mock_response)\n            mock_client_class.return_value.__aenter__ = AsyncMock(return_value=mock_client)\n            mock_client_class.return_value.__aexit__ = AsyncMock(return_value=None)\n\n            content, prefix = await fetch_url(\n                \"https://example.com/page\",\n                DEFAULT_USER_AGENT_AUTONOMOUS,\n                force_raw=True\n            )\n\n            assert content == html_content\n            assert \"cannot be simplified\" in prefix\n\n    @pytest.mark.asyncio\n    async def test_fetch_json_returns_raw(self):\n        \"\"\"Test fetching JSON content returns raw content.\"\"\"\n        json_content = '{\"key\": \"value\"}'\n        mock_response = MagicMock()\n        mock_response.status_code = 200\n        mock_response.text = json_content\n        mock_response.headers = {\"content-type\": \"application/json\"}\n\n        with patch(\"httpx.AsyncClient\") as mock_client_class:\n            mock_client = AsyncMock()\n            mock_client.get = AsyncMock(return_value=mock_response)\n            mock_client_class.return_value.__aenter__ = AsyncMock(return_value=mock_client)\n            mock_client_class.return_value.__aexit__ = AsyncMock(return_value=None)\n\n            content, prefix = await fetch_url(\n                \"https://api.example.com/data\",\n                DEFAULT_USER_AGENT_AUTONOMOUS\n            )\n\n            assert content == json_content\n            assert \"cannot be simplified\" in prefix\n\n    @pytest.mark.asyncio\n    async def test_fetch_404_raises_error(self):\n        \"\"\"Test that 404 response raises McpError.\"\"\"\n        mock_response = MagicMock()\n        mock_response.status_code = 404\n\n        with patch(\"httpx.AsyncClient\") as mock_client_class:\n            mock_client = AsyncMock()\n            mock_client.get = AsyncMock(return_value=mock_response)\n            mock_client_class.return_value.__aenter__ = AsyncMock(return_value=mock_client)\n            mock_client_class.return_value.__aexit__ = AsyncMock(return_value=None)\n\n            with pytest.raises(McpError):\n                await fetch_url(\n                    \"https://example.com/notfound\",\n                    DEFAULT_USER_AGENT_AUTONOMOUS\n                )\n\n    @pytest.mark.asyncio\n    async def test_fetch_500_raises_error(self):\n        \"\"\"Test that 500 response raises McpError.\"\"\"\n        mock_response = MagicMock()\n        mock_response.status_code = 500\n\n        with patch(\"httpx.AsyncClient\") as mock_client_class:\n            mock_client = AsyncMock()\n            mock_client.get = AsyncMock(return_value=mock_response)\n            mock_client_class.return_value.__aenter__ = AsyncMock(return_value=mock_client)\n            mock_client_class.return_value.__aexit__ = AsyncMock(return_value=None)\n\n            with pytest.raises(McpError):\n                await fetch_url(\n                    \"https://example.com/error\",\n                    DEFAULT_USER_AGENT_AUTONOMOUS\n                )\n\n    @pytest.mark.asyncio\n    async def test_fetch_with_proxy(self):\n        \"\"\"Test that proxy URL is passed to client.\"\"\"\n        mock_response = MagicMock()\n        mock_response.status_code = 200\n        mock_response.text = '{\"data\": \"test\"}'\n        mock_response.headers = {\"content-type\": \"application/json\"}\n\n        with patch(\"httpx.AsyncClient\") as mock_client_class:\n            mock_client = AsyncMock()\n            mock_client.get = AsyncMock(return_value=mock_response)\n            mock_client_class.return_value.__aenter__ = AsyncMock(return_value=mock_client)\n            mock_client_class.return_value.__aexit__ = AsyncMock(return_value=None)\n\n            await fetch_url(\n                \"https://example.com/data\",\n                DEFAULT_USER_AGENT_AUTONOMOUS,\n                proxy_url=\"http://proxy.example.com:8080\"\n            )\n\n            # Verify AsyncClient was called with proxy\n            mock_client_class.assert_called_once_with(proxy=\"http://proxy.example.com:8080\")\n"
  },
  {
    "path": "src/filesystem/Dockerfile",
    "content": "FROM node:22.12-alpine AS builder\n\nWORKDIR /app\n\nCOPY src/filesystem /app\nCOPY tsconfig.json /tsconfig.json\n\nRUN --mount=type=cache,target=/root/.npm npm install\n\nRUN --mount=type=cache,target=/root/.npm-production npm ci --ignore-scripts --omit-dev\n\n\nFROM node:22-alpine AS release\n\nWORKDIR /app\n\nCOPY --from=builder /app/dist /app/dist\nCOPY --from=builder /app/package.json /app/package.json\nCOPY --from=builder /app/package-lock.json /app/package-lock.json\n\nENV NODE_ENV=production\n\nRUN npm ci --ignore-scripts --omit-dev\n\nENTRYPOINT [\"node\", \"/app/dist/index.js\"]"
  },
  {
    "path": "src/filesystem/README.md",
    "content": "# Filesystem MCP Server\n\nNode.js server implementing Model Context Protocol (MCP) for filesystem operations.\n\n## Features\n\n- Read/write files\n- Create/list/delete directories\n- Move files/directories\n- Search files\n- Get file metadata\n- Dynamic directory access control via [Roots](https://modelcontextprotocol.io/docs/learn/client-concepts#roots)\n\n## Directory Access Control\n\nThe server uses a flexible directory access control system. Directories can be specified via command-line arguments or dynamically via [Roots](https://modelcontextprotocol.io/docs/learn/client-concepts#roots).\n\n### Method 1: Command-line Arguments\nSpecify Allowed directories when starting the server:\n```bash\nmcp-server-filesystem /path/to/dir1 /path/to/dir2\n```\n\n### Method 2: MCP Roots (Recommended)\nMCP clients that support [Roots](https://modelcontextprotocol.io/docs/learn/client-concepts#roots) can dynamically update the Allowed directories. \n\nRoots notified by Client to Server, completely replace any server-side Allowed directories when provided.\n\n**Important**: If server starts without command-line arguments AND client doesn't support roots protocol (or provides empty roots), the server will throw an error during initialization.\n\nThis is the recommended method, as this enables runtime directory updates via `roots/list_changed` notifications without server restart, providing a more flexible and modern integration experience.\n\n### How It Works\n\nThe server's directory access control follows this flow:\n\n1. **Server Startup**\n   - Server starts with directories from command-line arguments (if provided)\n   - If no arguments provided, server starts with empty allowed directories\n\n2. **Client Connection & Initialization**\n   - Client connects and sends `initialize` request with capabilities\n   - Server checks if client supports roots protocol (`capabilities.roots`)\n   \n3. **Roots Protocol Handling** (if client supports roots)\n   - **On initialization**: Server requests roots from client via `roots/list`\n   - Client responds with its configured roots\n   - Server replaces ALL allowed directories with client's roots\n   - **On runtime updates**: Client can send `notifications/roots/list_changed`\n   - Server requests updated roots and replaces allowed directories again\n\n4. **Fallback Behavior** (if client doesn't support roots)\n   - Server continues using command-line directories only\n   - No dynamic updates possible\n\n5. **Access Control**\n   - All filesystem operations are restricted to allowed directories\n   - Use `list_allowed_directories` tool to see current directories\n   - Server requires at least ONE allowed directory to operate\n\n**Note**: The server will only allow operations within directories specified either via `args` or via Roots.\n\n\n\n## API\n\n### Tools\n\n- **read_text_file**\n  - Read complete contents of a file as text\n  - Inputs:\n    - `path` (string)\n    - `head` (number, optional): First N lines\n    - `tail` (number, optional): Last N lines\n  - Always treats the file as UTF-8 text regardless of extension\n  - Cannot specify both `head` and `tail` simultaneously\n\n- **read_media_file**\n  - Read an image or audio file\n  - Inputs:\n    - `path` (string)\n  - Streams the file and returns base64 data with the corresponding MIME type\n\n- **read_multiple_files**\n  - Read multiple files simultaneously\n  - Input: `paths` (string[])\n  - Failed reads won't stop the entire operation\n\n- **write_file**\n  - Create new file or overwrite existing (exercise caution with this)\n  - Inputs:\n    - `path` (string): File location\n    - `content` (string): File content\n\n- **edit_file**\n  - Make selective edits using advanced pattern matching and formatting\n  - Features:\n    - Line-based and multi-line content matching\n    - Whitespace normalization with indentation preservation\n    - Multiple simultaneous edits with correct positioning\n    - Indentation style detection and preservation\n    - Git-style diff output with context\n    - Preview changes with dry run mode\n  - Inputs:\n    - `path` (string): File to edit\n    - `edits` (array): List of edit operations\n      - `oldText` (string): Text to search for (can be substring)\n      - `newText` (string): Text to replace with\n    - `dryRun` (boolean): Preview changes without applying (default: false)\n  - Returns detailed diff and match information for dry runs, otherwise applies changes\n  - Best Practice: Always use dryRun first to preview changes before applying them\n\n- **create_directory**\n  - Create new directory or ensure it exists\n  - Input: `path` (string)\n  - Creates parent directories if needed\n  - Succeeds silently if directory exists\n\n- **list_directory**\n  - List directory contents with [FILE] or [DIR] prefixes\n  - Input: `path` (string)\n\n- **list_directory_with_sizes**\n  - List directory contents with [FILE] or [DIR] prefixes, including file sizes\n  - Inputs:\n    - `path` (string): Directory path to list\n    - `sortBy` (string, optional): Sort entries by \"name\" or \"size\" (default: \"name\")\n  - Returns detailed listing with file sizes and summary statistics\n  - Shows total files, directories, and combined size\n\n- **move_file**\n  - Move or rename files and directories\n  - Inputs:\n    - `source` (string)\n    - `destination` (string)\n  - Fails if destination exists\n\n- **search_files**\n  - Recursively search for files/directories that match or do not match patterns\n  - Inputs:\n    - `path` (string): Starting directory\n    - `pattern` (string): Search pattern\n    - `excludePatterns` (string[]): Exclude any patterns.\n  - Glob-style pattern matching\n  - Returns full paths to matches\n\n- **directory_tree**\n  - Get recursive JSON tree structure of directory contents\n  - Inputs:\n    - `path` (string): Starting directory\n    - `excludePatterns` (string[]): Exclude any patterns. Glob formats are supported.\n  - Returns:\n    - JSON array where each entry contains:\n      - `name` (string): File/directory name\n      - `type` ('file'|'directory'): Entry type\n      - `children` (array): Present only for directories\n        - Empty array for empty directories\n        - Omitted for files\n  - Output is formatted with 2-space indentation for readability\n    \n- **get_file_info**\n  - Get detailed file/directory metadata\n  - Input: `path` (string)\n  - Returns:\n    - Size\n    - Creation time\n    - Modified time\n    - Access time\n    - Type (file/directory)\n    - Permissions\n\n- **list_allowed_directories**\n  - List all directories the server is allowed to access\n  - No input required\n  - Returns:\n    - Directories that this server can read/write from\n\n### Tool annotations (MCP hints)\n\nThis server sets [MCP ToolAnnotations](https://modelcontextprotocol.io/specification/2025-03-26/server/tools#toolannotations)\non each tool so clients can:\n\n- Distinguish **read‑only** tools from write‑capable tools.\n- Understand which write operations are **idempotent** (safe to retry with the same arguments).\n- Highlight operations that may be **destructive** (overwriting or heavily mutating data).\n\nThe mapping for filesystem tools is:\n\n| Tool                        | readOnlyHint | idempotentHint | destructiveHint | Notes                                            |\n|-----------------------------|--------------|----------------|-----------------|--------------------------------------------------|\n| `read_text_file`            | `true`       | –              | –               | Pure read                                       |\n| `read_media_file`           | `true`       | –              | –               | Pure read                                       |\n| `read_multiple_files`       | `true`       | –              | –               | Pure read                                       |\n| `list_directory`            | `true`       | –              | –               | Pure read                                       |\n| `list_directory_with_sizes` | `true`       | –              | –               | Pure read                                       |\n| `directory_tree`            | `true`       | –              | –               | Pure read                                       |\n| `search_files`              | `true`       | –              | –               | Pure read                                       |\n| `get_file_info`             | `true`       | –              | –               | Pure read                                       |\n| `list_allowed_directories`  | `true`       | –              | –               | Pure read                                       |\n| `create_directory`          | `false`      | `true`         | `false`         | Re‑creating the same dir is a no‑op             |\n| `write_file`                | `false`      | `true`         | `true`          | Overwrites existing files                       |\n| `edit_file`                 | `false`      | `false`        | `true`          | Re‑applying edits can fail or double‑apply      |\n| `move_file`                 | `false`      | `false`        | `true`          | Deletes source file                             |\n\n> Note: `idempotentHint` and `destructiveHint` are meaningful only when `readOnlyHint` is `false`, as defined by the MCP spec.\n\n## Usage with Claude Desktop\nAdd this to your `claude_desktop_config.json`:\n\nNote: you can provide sandboxed directories to the server by mounting them to `/projects`. Adding the `ro` flag will make the directory readonly by the server.\n\n### Docker\nNote: all directories must be mounted to `/projects` by default.\n\n```json\n{\n  \"mcpServers\": {\n    \"filesystem\": {\n      \"command\": \"docker\",\n      \"args\": [\n        \"run\",\n        \"-i\",\n        \"--rm\",\n        \"--mount\", \"type=bind,src=/Users/username/Desktop,dst=/projects/Desktop\",\n        \"--mount\", \"type=bind,src=/path/to/other/allowed/dir,dst=/projects/other/allowed/dir,ro\",\n        \"--mount\", \"type=bind,src=/path/to/file.txt,dst=/projects/path/to/file.txt\",\n        \"mcp/filesystem\",\n        \"/projects\"\n      ]\n    }\n  }\n}\n```\n\n### NPX\n\n```json\n{\n  \"mcpServers\": {\n    \"filesystem\": {\n      \"command\": \"npx\",\n      \"args\": [\n        \"-y\",\n        \"@modelcontextprotocol/server-filesystem\",\n        \"/Users/username/Desktop\",\n        \"/path/to/other/allowed/dir\"\n      ]\n    }\n  }\n}\n```\n\n## Usage with VS Code\n\nFor quick installation, click the installation buttons below...\n\n[![Install with NPX in VS Code](https://img.shields.io/badge/VS_Code-NPM-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=filesystem&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%40modelcontextprotocol%2Fserver-filesystem%22%2C%22%24%7BworkspaceFolder%7D%22%5D%7D) [![Install with NPX in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-NPM-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=filesystem&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%40modelcontextprotocol%2Fserver-filesystem%22%2C%22%24%7BworkspaceFolder%7D%22%5D%7D&quality=insiders)\n\n[![Install with Docker in VS Code](https://img.shields.io/badge/VS_Code-Docker-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=filesystem&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22--rm%22%2C%22--mount%22%2C%22type%3Dbind%2Csrc%3D%24%7BworkspaceFolder%7D%2Cdst%3D%2Fprojects%2Fworkspace%22%2C%22mcp%2Ffilesystem%22%2C%22%2Fprojects%22%5D%7D) [![Install with Docker in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Docker-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=filesystem&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22--rm%22%2C%22--mount%22%2C%22type%3Dbind%2Csrc%3D%24%7BworkspaceFolder%7D%2Cdst%3D%2Fprojects%2Fworkspace%22%2C%22mcp%2Ffilesystem%22%2C%22%2Fprojects%22%5D%7D&quality=insiders)\n\nFor manual installation, you can configure the MCP server using one of these methods:\n\n**Method 1: User Configuration (Recommended)**\nAdd the configuration to your user-level MCP configuration file. Open the Command Palette (`Ctrl + Shift + P`) and run `MCP: Open User Configuration`. This will open your user `mcp.json` file where you can add the server configuration.\n\n**Method 2: Workspace Configuration**\nAlternatively, you can add the configuration to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others.\n\n> For more details about MCP configuration in VS Code, see the [official VS Code MCP documentation](https://code.visualstudio.com/docs/copilot/customization/mcp-servers).\n\nYou can provide sandboxed directories to the server by mounting them to `/projects`. Adding the `ro` flag will make the directory readonly by the server.\n\n### Docker\nNote: all directories must be mounted to `/projects` by default. \n\n```json\n{\n  \"servers\": {\n    \"filesystem\": {\n      \"command\": \"docker\",\n      \"args\": [\n        \"run\",\n        \"-i\",\n        \"--rm\",\n        \"--mount\", \"type=bind,src=${workspaceFolder},dst=/projects/workspace\",\n        \"mcp/filesystem\",\n        \"/projects\"\n      ]\n    }\n  }\n}\n```\n\n### NPX\n\n```json\n{\n  \"servers\": {\n    \"filesystem\": {\n      \"command\": \"npx\",\n      \"args\": [\n        \"-y\",\n        \"@modelcontextprotocol/server-filesystem\",\n        \"${workspaceFolder}\"\n      ]\n    }\n  }\n}\n```\n\n## Build\n\nDocker build:\n\n```bash\ndocker build -t mcp/filesystem -f src/filesystem/Dockerfile .\n```\n\n## License\n\nThis MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository.\n"
  },
  {
    "path": "src/filesystem/__tests__/directory-tree.test.ts",
    "content": "import { describe, it, expect, beforeEach, afterEach } from 'vitest';\nimport * as fs from 'fs/promises';\nimport * as path from 'path';\nimport * as os from 'os';\n\n// We need to test the buildTree function, but it's defined inside the request handler\n// So we'll extract the core logic into a testable function\nimport { minimatch } from 'minimatch';\n\ninterface TreeEntry {\n    name: string;\n    type: 'file' | 'directory';\n    children?: TreeEntry[];\n}\n\nasync function buildTreeForTesting(currentPath: string, rootPath: string, excludePatterns: string[] = []): Promise<TreeEntry[]> {\n    const entries = await fs.readdir(currentPath, {withFileTypes: true});\n    const result: TreeEntry[] = [];\n\n    for (const entry of entries) {\n        const relativePath = path.relative(rootPath, path.join(currentPath, entry.name));\n        const shouldExclude = excludePatterns.some(pattern => {\n            if (pattern.includes('*')) {\n                return minimatch(relativePath, pattern, {dot: true});\n            }\n            // For files: match exact name or as part of path\n            // For directories: match as directory path\n            return minimatch(relativePath, pattern, {dot: true}) ||\n                   minimatch(relativePath, `**/${pattern}`, {dot: true}) ||\n                   minimatch(relativePath, `**/${pattern}/**`, {dot: true});\n        });\n        if (shouldExclude)\n            continue;\n\n        const entryData: TreeEntry = {\n            name: entry.name,\n            type: entry.isDirectory() ? 'directory' : 'file'\n        };\n\n        if (entry.isDirectory()) {\n            const subPath = path.join(currentPath, entry.name);\n            entryData.children = await buildTreeForTesting(subPath, rootPath, excludePatterns);\n        }\n\n        result.push(entryData);\n    }\n\n    return result;\n}\n\ndescribe('buildTree exclude patterns', () => {\n    let testDir: string;\n\n    beforeEach(async () => {\n        testDir = await fs.mkdtemp(path.join(os.tmpdir(), 'filesystem-test-'));\n        \n        // Create test directory structure\n        await fs.mkdir(path.join(testDir, 'src'));\n        await fs.mkdir(path.join(testDir, 'node_modules'));\n        await fs.mkdir(path.join(testDir, '.git'));\n        await fs.mkdir(path.join(testDir, 'nested', 'node_modules'), { recursive: true });\n        \n        // Create test files\n        await fs.writeFile(path.join(testDir, '.env'), 'SECRET=value');\n        await fs.writeFile(path.join(testDir, '.env.local'), 'LOCAL_SECRET=value');\n        await fs.writeFile(path.join(testDir, 'src', 'index.js'), 'console.log(\"hello\");');\n        await fs.writeFile(path.join(testDir, 'package.json'), '{}');\n        await fs.writeFile(path.join(testDir, 'node_modules', 'module.js'), 'module.exports = {};');\n        await fs.writeFile(path.join(testDir, 'nested', 'node_modules', 'deep.js'), 'module.exports = {};');\n    });\n\n    afterEach(async () => {\n        await fs.rm(testDir, { recursive: true, force: true });\n    });\n\n    it('should exclude files matching simple patterns', async () => {\n        // Test the current implementation - this will fail until the bug is fixed\n        const tree = await buildTreeForTesting(testDir, testDir, ['.env']);\n        const fileNames = tree.map(entry => entry.name);\n        \n        expect(fileNames).not.toContain('.env');\n        expect(fileNames).toContain('.env.local'); // Should not exclude this\n        expect(fileNames).toContain('src');\n        expect(fileNames).toContain('package.json');\n    });\n\n    it('should exclude directories matching simple patterns', async () => {\n        const tree = await buildTreeForTesting(testDir, testDir, ['node_modules']);\n        const dirNames = tree.map(entry => entry.name);\n        \n        expect(dirNames).not.toContain('node_modules');\n        expect(dirNames).toContain('src');\n        expect(dirNames).toContain('.git');\n    });\n\n    it('should exclude nested directories with same pattern', async () => {\n        const tree = await buildTreeForTesting(testDir, testDir, ['node_modules']);\n        \n        // Find the nested directory\n        const nestedDir = tree.find(entry => entry.name === 'nested');\n        expect(nestedDir).toBeDefined();\n        expect(nestedDir!.children).toBeDefined();\n        \n        // The nested/node_modules should also be excluded\n        const nestedChildren = nestedDir!.children!.map(child => child.name);\n        expect(nestedChildren).not.toContain('node_modules');\n    });\n\n    it('should handle glob patterns correctly', async () => {\n        const tree = await buildTreeForTesting(testDir, testDir, ['*.env']);\n        const fileNames = tree.map(entry => entry.name);\n        \n        expect(fileNames).not.toContain('.env');\n        expect(fileNames).toContain('.env.local'); // *.env should not match .env.local\n        expect(fileNames).toContain('src');\n    });\n\n    it('should handle dot files correctly', async () => {\n        const tree = await buildTreeForTesting(testDir, testDir, ['.git']);\n        const dirNames = tree.map(entry => entry.name);\n        \n        expect(dirNames).not.toContain('.git');\n        expect(dirNames).toContain('.env'); // Should not exclude this\n    });\n\n    it('should work with multiple exclude patterns', async () => {\n        const tree = await buildTreeForTesting(testDir, testDir, ['node_modules', '.env', '.git']);\n        const entryNames = tree.map(entry => entry.name);\n        \n        expect(entryNames).not.toContain('node_modules');\n        expect(entryNames).not.toContain('.env');\n        expect(entryNames).not.toContain('.git');\n        expect(entryNames).toContain('src');\n        expect(entryNames).toContain('package.json');\n    });\n\n    it('should handle empty exclude patterns', async () => {\n        const tree = await buildTreeForTesting(testDir, testDir, []);\n        const entryNames = tree.map(entry => entry.name);\n        \n        // All entries should be included\n        expect(entryNames).toContain('node_modules');\n        expect(entryNames).toContain('.env');\n        expect(entryNames).toContain('.git');\n        expect(entryNames).toContain('src');\n    });\n});"
  },
  {
    "path": "src/filesystem/__tests__/lib.test.ts",
    "content": "import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';\nimport fs from 'fs/promises';\nimport path from 'path';\nimport os from 'os';\nimport {\n  // Pure utility functions\n  formatSize,\n  normalizeLineEndings,\n  createUnifiedDiff,\n  // Security & validation functions\n  validatePath,\n  setAllowedDirectories,\n  // File operations\n  getFileStats,\n  readFileContent,\n  writeFileContent,\n  // Search & filtering functions\n  searchFilesWithValidation,\n  // File editing functions\n  applyFileEdits,\n  tailFile,\n  headFile\n} from '../lib.js';\n\n// Mock fs module\nvi.mock('fs/promises');\nconst mockFs = fs as any;\n\ndescribe('Lib Functions', () => {\n  beforeEach(() => {\n    vi.clearAllMocks();\n    // Set up allowed directories for tests\n    const allowedDirs = process.platform === 'win32' ? ['C:\\\\Users\\\\test', 'C:\\\\temp', 'C:\\\\allowed'] : ['/home/user', '/tmp', '/allowed'];\n    setAllowedDirectories(allowedDirs);\n  });\n\n  afterEach(() => {\n    vi.restoreAllMocks();\n    // Clear allowed directories after tests\n    setAllowedDirectories([]);\n  });\n\n  describe('Pure Utility Functions', () => {\n    describe('formatSize', () => {\n      it('formats bytes correctly', () => {\n        expect(formatSize(0)).toBe('0 B');\n        expect(formatSize(512)).toBe('512 B');\n        expect(formatSize(1024)).toBe('1.00 KB');\n        expect(formatSize(1536)).toBe('1.50 KB');\n        expect(formatSize(1048576)).toBe('1.00 MB');\n        expect(formatSize(1073741824)).toBe('1.00 GB');\n        expect(formatSize(1099511627776)).toBe('1.00 TB');\n      });\n\n      it('handles edge cases', () => {\n        expect(formatSize(1023)).toBe('1023 B');\n        expect(formatSize(1025)).toBe('1.00 KB');\n        expect(formatSize(1048575)).toBe('1024.00 KB');\n      });\n\n      it('handles very large numbers beyond TB', () => {\n        // The function only supports up to TB, so very large numbers will show as TB\n        expect(formatSize(1024 * 1024 * 1024 * 1024 * 1024)).toBe('1024.00 TB');\n        expect(formatSize(Number.MAX_SAFE_INTEGER)).toContain('TB');\n      });\n\n      it('handles negative numbers', () => {\n        // Negative numbers will result in NaN for the log calculation\n        expect(formatSize(-1024)).toContain('NaN');\n        expect(formatSize(-0)).toBe('0 B');\n      });\n\n      it('handles decimal numbers', () => {\n        expect(formatSize(1536.5)).toBe('1.50 KB');\n        expect(formatSize(1023.9)).toBe('1023.9 B');\n      });\n\n      it('handles very small positive numbers', () => {\n        expect(formatSize(1)).toBe('1 B');\n        expect(formatSize(0.5)).toBe('0.5 B');\n        expect(formatSize(0.1)).toBe('0.1 B');\n      });\n    });\n\n    describe('normalizeLineEndings', () => {\n      it('converts CRLF to LF', () => {\n        expect(normalizeLineEndings('line1\\r\\nline2\\r\\nline3')).toBe('line1\\nline2\\nline3');\n      });\n\n      it('leaves LF unchanged', () => {\n        expect(normalizeLineEndings('line1\\nline2\\nline3')).toBe('line1\\nline2\\nline3');\n      });\n\n      it('handles mixed line endings', () => {\n        expect(normalizeLineEndings('line1\\r\\nline2\\nline3\\r\\n')).toBe('line1\\nline2\\nline3\\n');\n      });\n\n      it('handles empty string', () => {\n        expect(normalizeLineEndings('')).toBe('');\n      });\n    });\n\n    describe('createUnifiedDiff', () => {\n      it('creates diff for simple changes', () => {\n        const original = 'line1\\nline2\\nline3';\n        const modified = 'line1\\nmodified line2\\nline3';\n        const diff = createUnifiedDiff(original, modified, 'test.txt');\n        \n        expect(diff).toContain('--- test.txt');\n        expect(diff).toContain('+++ test.txt');\n        expect(diff).toContain('-line2');\n        expect(diff).toContain('+modified line2');\n      });\n\n      it('handles CRLF normalization', () => {\n        const original = 'line1\\r\\nline2\\r\\n';\n        const modified = 'line1\\nmodified line2\\n';\n        const diff = createUnifiedDiff(original, modified);\n        \n        expect(diff).toContain('-line2');\n        expect(diff).toContain('+modified line2');\n      });\n\n      it('handles identical content', () => {\n        const content = 'line1\\nline2\\nline3';\n        const diff = createUnifiedDiff(content, content);\n        \n        // Should not contain any +/- lines for identical content (excluding header lines)\n        expect(diff.split('\\n').filter((line: string) => line.startsWith('+++') || line.startsWith('---'))).toHaveLength(2);\n        expect(diff.split('\\n').filter((line: string) => line.startsWith('+') && !line.startsWith('+++'))).toHaveLength(0);\n        expect(diff.split('\\n').filter((line: string) => line.startsWith('-') && !line.startsWith('---'))).toHaveLength(0);\n      });\n\n      it('handles empty content', () => {\n        const diff = createUnifiedDiff('', '');\n        expect(diff).toContain('--- file');\n        expect(diff).toContain('+++ file');\n      });\n\n      it('handles default filename parameter', () => {\n        const diff = createUnifiedDiff('old', 'new');\n        expect(diff).toContain('--- file');\n        expect(diff).toContain('+++ file');\n      });\n\n      it('handles custom filename', () => {\n        const diff = createUnifiedDiff('old', 'new', 'custom.txt');\n        expect(diff).toContain('--- custom.txt');\n        expect(diff).toContain('+++ custom.txt');\n      });\n    });\n  });\n\n  describe('Security & Validation Functions', () => {\n    describe('validatePath', () => {\n      // Use Windows-compatible paths for testing\n      const allowedDirs = process.platform === 'win32' ? ['C:\\\\Users\\\\test', 'C:\\\\temp'] : ['/home/user', '/tmp'];\n\n      beforeEach(() => {\n        mockFs.realpath.mockImplementation(async (path: any) => path.toString());\n      });\n\n      it('validates allowed paths', async () => {\n        const testPath = process.platform === 'win32' ? 'C:\\\\Users\\\\test\\\\file.txt' : '/home/user/file.txt';\n        const result = await validatePath(testPath);\n        expect(result).toBe(testPath);\n      });\n\n      it('rejects disallowed paths', async () => {\n        const testPath = process.platform === 'win32' ? 'C:\\\\Windows\\\\System32\\\\file.txt' : '/etc/passwd';\n        await expect(validatePath(testPath))\n          .rejects.toThrow('Access denied - path outside allowed directories');\n      });\n\n      it('handles non-existent files by checking parent directory', async () => {\n        const newFilePath = process.platform === 'win32' ? 'C:\\\\Users\\\\test\\\\newfile.txt' : '/home/user/newfile.txt';\n        const parentPath = process.platform === 'win32' ? 'C:\\\\Users\\\\test' : '/home/user';\n        \n        // Create an error with the ENOENT code that the implementation checks for\n        const enoentError = new Error('ENOENT') as NodeJS.ErrnoException;\n        enoentError.code = 'ENOENT';\n        \n        mockFs.realpath\n          .mockRejectedValueOnce(enoentError)\n          .mockResolvedValueOnce(parentPath);\n        \n        const result = await validatePath(newFilePath);\n        expect(result).toBe(path.resolve(newFilePath));\n      });\n\n      it('rejects when parent directory does not exist', async () => {\n        const newFilePath = process.platform === 'win32' ? 'C:\\\\Users\\\\test\\\\nonexistent\\\\newfile.txt' : '/home/user/nonexistent/newfile.txt';\n        \n        // Create errors with the ENOENT code\n        const enoentError1 = new Error('ENOENT') as NodeJS.ErrnoException;\n        enoentError1.code = 'ENOENT';\n        const enoentError2 = new Error('ENOENT') as NodeJS.ErrnoException;\n        enoentError2.code = 'ENOENT';\n        \n        mockFs.realpath\n          .mockRejectedValueOnce(enoentError1)\n          .mockRejectedValueOnce(enoentError2);\n        \n        await expect(validatePath(newFilePath))\n          .rejects.toThrow('Parent directory does not exist');\n      });\n\n      it('resolves relative paths against allowed directories instead of process.cwd()', async () => {\n        const relativePath = 'test-file.txt';\n        const originalCwd = process.cwd;\n        \n        // Mock process.cwd to return a directory outside allowed directories\n        const disallowedCwd = process.platform === 'win32' ? 'C:\\\\Windows\\\\System32' : '/root';\n        (process as any).cwd = vi.fn(() => disallowedCwd);\n        \n        try {\n          const result = await validatePath(relativePath);\n          \n          // Result should be resolved against first allowed directory, not process.cwd()\n          const expectedPath = process.platform === 'win32' \n            ? path.resolve('C:\\\\Users\\\\test', relativePath)\n            : path.resolve('/home/user', relativePath);\n          \n          expect(result).toBe(expectedPath);\n          expect(result).not.toContain(disallowedCwd);\n        } finally {\n          // Restore original process.cwd\n          process.cwd = originalCwd;\n        }\n      });\n    });\n  });\n\n  describe('File Operations', () => {\n    describe('getFileStats', () => {\n      it('returns file statistics', async () => {\n        const mockStats = {\n          size: 1024,\n          birthtime: new Date('2023-01-01'),\n          mtime: new Date('2023-01-02'),\n          atime: new Date('2023-01-03'),\n          isDirectory: () => false,\n          isFile: () => true,\n          mode: 0o644\n        };\n        \n        mockFs.stat.mockResolvedValueOnce(mockStats as any);\n        \n        const result = await getFileStats('/test/file.txt');\n        \n        expect(result).toEqual({\n          size: 1024,\n          created: new Date('2023-01-01'),\n          modified: new Date('2023-01-02'),\n          accessed: new Date('2023-01-03'),\n          isDirectory: false,\n          isFile: true,\n          permissions: '644'\n        });\n      });\n\n      it('handles directory statistics', async () => {\n        const mockStats = {\n          size: 4096,\n          birthtime: new Date('2023-01-01'),\n          mtime: new Date('2023-01-02'),\n          atime: new Date('2023-01-03'),\n          isDirectory: () => true,\n          isFile: () => false,\n          mode: 0o755\n        };\n        \n        mockFs.stat.mockResolvedValueOnce(mockStats as any);\n        \n        const result = await getFileStats('/test/dir');\n        \n        expect(result.isDirectory).toBe(true);\n        expect(result.isFile).toBe(false);\n        expect(result.permissions).toBe('755');\n      });\n    });\n\n    describe('readFileContent', () => {\n      it('reads file with default encoding', async () => {\n        mockFs.readFile.mockResolvedValueOnce('file content');\n        \n        const result = await readFileContent('/test/file.txt');\n        \n        expect(result).toBe('file content');\n        expect(mockFs.readFile).toHaveBeenCalledWith('/test/file.txt', 'utf-8');\n      });\n\n      it('reads file with custom encoding', async () => {\n        mockFs.readFile.mockResolvedValueOnce('file content');\n        \n        const result = await readFileContent('/test/file.txt', 'ascii');\n        \n        expect(result).toBe('file content');\n        expect(mockFs.readFile).toHaveBeenCalledWith('/test/file.txt', 'ascii');\n      });\n    });\n\n    describe('writeFileContent', () => {\n      it('writes file content', async () => {\n        mockFs.writeFile.mockResolvedValueOnce(undefined);\n        \n        await writeFileContent('/test/file.txt', 'new content');\n        \n        expect(mockFs.writeFile).toHaveBeenCalledWith('/test/file.txt', 'new content', { encoding: \"utf-8\", flag: 'wx' });\n      });\n    });\n\n  });\n\n  describe('Search & Filtering Functions', () => {\n    describe('searchFilesWithValidation', () => {\n      beforeEach(() => {\n        mockFs.realpath.mockImplementation(async (path: any) => path.toString());\n      });\n\n\n      it('excludes files matching exclude patterns', async () => {\n        const mockEntries = [\n          { name: 'test.txt', isDirectory: () => false },\n          { name: 'test.log', isDirectory: () => false },\n          { name: 'node_modules', isDirectory: () => true }\n        ];\n        \n        mockFs.readdir.mockResolvedValueOnce(mockEntries as any);\n        \n        const testDir = process.platform === 'win32' ? 'C:\\\\allowed\\\\dir' : '/allowed/dir';\n        const allowedDirs = process.platform === 'win32' ? ['C:\\\\allowed'] : ['/allowed'];\n        \n        // Mock realpath to return the same path for validation to pass\n        mockFs.realpath.mockImplementation(async (inputPath: any) => {\n          const pathStr = inputPath.toString();\n          // Return the path as-is for validation\n          return pathStr;\n        });\n        \n        const result = await searchFilesWithValidation(\n          testDir,\n          '*test*',\n          allowedDirs,\n          { excludePatterns: ['*.log', 'node_modules'] }\n        );\n        \n        const expectedResult = process.platform === 'win32' ? 'C:\\\\allowed\\\\dir\\\\test.txt' : '/allowed/dir/test.txt';\n        expect(result).toEqual([expectedResult]);\n      });\n\n      it('handles validation errors during search', async () => {\n        const mockEntries = [\n          { name: 'test.txt', isDirectory: () => false },\n          { name: 'invalid_file.txt', isDirectory: () => false }\n        ];\n        \n        mockFs.readdir.mockResolvedValueOnce(mockEntries as any);\n        \n        // Mock validatePath to throw error for invalid_file.txt\n        mockFs.realpath.mockImplementation(async (path: any) => {\n          if (path.toString().includes('invalid_file.txt')) {\n            throw new Error('Access denied');\n          }\n          return path.toString();\n        });\n        \n        const testDir = process.platform === 'win32' ? 'C:\\\\allowed\\\\dir' : '/allowed/dir';\n        const allowedDirs = process.platform === 'win32' ? ['C:\\\\allowed'] : ['/allowed'];\n        \n        const result = await searchFilesWithValidation(\n          testDir,\n          '*test*',\n          allowedDirs,\n          {}\n        );\n        \n        // Should only return the valid file, skipping the invalid one\n        const expectedResult = process.platform === 'win32' ? 'C:\\\\allowed\\\\dir\\\\test.txt' : '/allowed/dir/test.txt';\n        expect(result).toEqual([expectedResult]);\n      });\n\n      it('handles complex exclude patterns with wildcards', async () => {\n        const mockEntries = [\n          { name: 'test.txt', isDirectory: () => false },\n          { name: 'test.backup', isDirectory: () => false },\n          { name: 'important_test.js', isDirectory: () => false }\n        ];\n        \n        mockFs.readdir.mockResolvedValueOnce(mockEntries as any);\n        \n        const testDir = process.platform === 'win32' ? 'C:\\\\allowed\\\\dir' : '/allowed/dir';\n        const allowedDirs = process.platform === 'win32' ? ['C:\\\\allowed'] : ['/allowed'];\n        \n        const result = await searchFilesWithValidation(\n          testDir,\n          '*test*',\n          allowedDirs,\n          { excludePatterns: ['*.backup'] }\n        );\n        \n        const expectedResults = process.platform === 'win32' ? [\n          'C:\\\\allowed\\\\dir\\\\test.txt',\n          'C:\\\\allowed\\\\dir\\\\important_test.js'\n        ] : [\n          '/allowed/dir/test.txt',\n          '/allowed/dir/important_test.js'\n        ];\n        expect(result).toEqual(expectedResults);\n      });\n    });\n  });\n\n  describe('File Editing Functions', () => {\n    describe('applyFileEdits', () => {\n      beforeEach(() => {\n        mockFs.readFile.mockResolvedValue('line1\\nline2\\nline3\\n');\n        mockFs.writeFile.mockResolvedValue(undefined);\n      });\n\n      it('applies simple text replacement', async () => {\n        const edits = [\n          { oldText: 'line2', newText: 'modified line2' }\n        ];\n        \n        mockFs.rename.mockResolvedValueOnce(undefined);\n        \n        const result = await applyFileEdits('/test/file.txt', edits, false);\n        \n        expect(result).toContain('modified line2');\n        // Should write to temporary file then rename\n        expect(mockFs.writeFile).toHaveBeenCalledWith(\n          expect.stringMatching(/\\/test\\/file\\.txt\\.[a-f0-9]+\\.tmp$/),\n          'line1\\nmodified line2\\nline3\\n',\n          'utf-8'\n        );\n        expect(mockFs.rename).toHaveBeenCalledWith(\n          expect.stringMatching(/\\/test\\/file\\.txt\\.[a-f0-9]+\\.tmp$/),\n          '/test/file.txt'\n        );\n      });\n\n      it('handles dry run mode', async () => {\n        const edits = [\n          { oldText: 'line2', newText: 'modified line2' }\n        ];\n        \n        const result = await applyFileEdits('/test/file.txt', edits, true);\n        \n        expect(result).toContain('modified line2');\n        expect(mockFs.writeFile).not.toHaveBeenCalled();\n      });\n\n      it('applies multiple edits sequentially', async () => {\n        const edits = [\n          { oldText: 'line1', newText: 'first line' },\n          { oldText: 'line3', newText: 'third line' }\n        ];\n        \n        mockFs.rename.mockResolvedValueOnce(undefined);\n        \n        await applyFileEdits('/test/file.txt', edits, false);\n        \n        expect(mockFs.writeFile).toHaveBeenCalledWith(\n          expect.stringMatching(/\\/test\\/file\\.txt\\.[a-f0-9]+\\.tmp$/),\n          'first line\\nline2\\nthird line\\n',\n          'utf-8'\n        );\n        expect(mockFs.rename).toHaveBeenCalledWith(\n          expect.stringMatching(/\\/test\\/file\\.txt\\.[a-f0-9]+\\.tmp$/),\n          '/test/file.txt'\n        );\n      });\n\n      it('handles whitespace-flexible matching', async () => {\n        mockFs.readFile.mockResolvedValue('  line1\\n    line2\\n  line3\\n');\n        \n        const edits = [\n          { oldText: 'line2', newText: 'modified line2' }\n        ];\n        \n        mockFs.rename.mockResolvedValueOnce(undefined);\n        \n        await applyFileEdits('/test/file.txt', edits, false);\n        \n        expect(mockFs.writeFile).toHaveBeenCalledWith(\n          expect.stringMatching(/\\/test\\/file\\.txt\\.[a-f0-9]+\\.tmp$/),\n          '  line1\\n    modified line2\\n  line3\\n',\n          'utf-8'\n        );\n        expect(mockFs.rename).toHaveBeenCalledWith(\n          expect.stringMatching(/\\/test\\/file\\.txt\\.[a-f0-9]+\\.tmp$/),\n          '/test/file.txt'\n        );\n      });\n\n      it('throws error for non-matching edits', async () => {\n        const edits = [\n          { oldText: 'nonexistent line', newText: 'replacement' }\n        ];\n        \n        await expect(applyFileEdits('/test/file.txt', edits, false))\n          .rejects.toThrow('Could not find exact match for edit');\n      });\n\n      it('handles complex multi-line edits with indentation', async () => {\n        mockFs.readFile.mockResolvedValue('function test() {\\n  console.log(\"hello\");\\n  return true;\\n}');\n        \n        const edits = [\n          { \n            oldText: '  console.log(\"hello\");\\n  return true;', \n            newText: '  console.log(\"world\");\\n  console.log(\"test\");\\n  return false;' \n          }\n        ];\n        \n        mockFs.rename.mockResolvedValueOnce(undefined);\n        \n        await applyFileEdits('/test/file.js', edits, false);\n        \n        expect(mockFs.writeFile).toHaveBeenCalledWith(\n          expect.stringMatching(/\\/test\\/file\\.js\\.[a-f0-9]+\\.tmp$/),\n          'function test() {\\n  console.log(\"world\");\\n  console.log(\"test\");\\n  return false;\\n}',\n          'utf-8'\n        );\n        expect(mockFs.rename).toHaveBeenCalledWith(\n          expect.stringMatching(/\\/test\\/file\\.js\\.[a-f0-9]+\\.tmp$/),\n          '/test/file.js'\n        );\n      });\n\n      it('handles edits with different indentation patterns', async () => {\n        mockFs.readFile.mockResolvedValue('    if (condition) {\\n        doSomething();\\n    }');\n        \n        const edits = [\n          { \n            oldText: 'doSomething();', \n            newText: 'doSomethingElse();\\n        doAnotherThing();' \n          }\n        ];\n        \n        mockFs.rename.mockResolvedValueOnce(undefined);\n        \n        await applyFileEdits('/test/file.js', edits, false);\n        \n        expect(mockFs.writeFile).toHaveBeenCalledWith(\n          expect.stringMatching(/\\/test\\/file\\.js\\.[a-f0-9]+\\.tmp$/),\n          '    if (condition) {\\n        doSomethingElse();\\n        doAnotherThing();\\n    }',\n          'utf-8'\n        );\n        expect(mockFs.rename).toHaveBeenCalledWith(\n          expect.stringMatching(/\\/test\\/file\\.js\\.[a-f0-9]+\\.tmp$/),\n          '/test/file.js'\n        );\n      });\n\n      it('handles CRLF line endings in file content', async () => {\n        mockFs.readFile.mockResolvedValue('line1\\r\\nline2\\r\\nline3\\r\\n');\n        \n        const edits = [\n          { oldText: 'line2', newText: 'modified line2' }\n        ];\n        \n        mockFs.rename.mockResolvedValueOnce(undefined);\n        \n        await applyFileEdits('/test/file.txt', edits, false);\n        \n        expect(mockFs.writeFile).toHaveBeenCalledWith(\n          expect.stringMatching(/\\/test\\/file\\.txt\\.[a-f0-9]+\\.tmp$/),\n          'line1\\nmodified line2\\nline3\\n',\n          'utf-8'\n        );\n        expect(mockFs.rename).toHaveBeenCalledWith(\n          expect.stringMatching(/\\/test\\/file\\.txt\\.[a-f0-9]+\\.tmp$/),\n          '/test/file.txt'\n        );\n      });\n    });\n\n    describe('tailFile', () => {\n      it('handles empty files', async () => {\n        mockFs.stat.mockResolvedValue({ size: 0 } as any);\n        \n        const result = await tailFile('/test/empty.txt', 5);\n        \n        expect(result).toBe('');\n        expect(mockFs.open).not.toHaveBeenCalled();\n      });\n\n      it('calls stat to check file size', async () => {\n        mockFs.stat.mockResolvedValue({ size: 100 } as any);\n        \n        // Mock file handle with proper typing\n        const mockFileHandle = {\n          read: vi.fn(),\n          close: vi.fn()\n        } as any;\n        \n        mockFileHandle.read.mockResolvedValue({ bytesRead: 0 });\n        mockFileHandle.close.mockResolvedValue(undefined);\n        \n        mockFs.open.mockResolvedValue(mockFileHandle);\n        \n        await tailFile('/test/file.txt', 2);\n        \n        expect(mockFs.stat).toHaveBeenCalledWith('/test/file.txt');\n        expect(mockFs.open).toHaveBeenCalledWith('/test/file.txt', 'r');\n      });\n\n      it('handles files with content and returns last lines', async () => {\n        mockFs.stat.mockResolvedValue({ size: 50 } as any);\n        \n        const mockFileHandle = {\n          read: vi.fn(),\n          close: vi.fn()\n        } as any;\n        \n        // Simulate reading file content in chunks\n        mockFileHandle.read\n          .mockResolvedValueOnce({ bytesRead: 20, buffer: Buffer.from('line3\\nline4\\nline5\\n') })\n          .mockResolvedValueOnce({ bytesRead: 0 });\n        mockFileHandle.close.mockResolvedValue(undefined);\n        \n        mockFs.open.mockResolvedValue(mockFileHandle);\n        \n        const result = await tailFile('/test/file.txt', 2);\n        \n        expect(mockFileHandle.close).toHaveBeenCalled();\n      });\n\n      it('handles read errors gracefully', async () => {\n        mockFs.stat.mockResolvedValue({ size: 100 } as any);\n        \n        const mockFileHandle = {\n          read: vi.fn(),\n          close: vi.fn()\n        } as any;\n        \n        mockFileHandle.read.mockResolvedValue({ bytesRead: 0 });\n        mockFileHandle.close.mockResolvedValue(undefined);\n        \n        mockFs.open.mockResolvedValue(mockFileHandle);\n        \n        await tailFile('/test/file.txt', 5);\n        \n        expect(mockFileHandle.close).toHaveBeenCalled();\n      });\n    });\n\n    describe('headFile', () => {\n      it('opens file for reading', async () => {\n        // Mock file handle with proper typing\n        const mockFileHandle = {\n          read: vi.fn(),\n          close: vi.fn()\n        } as any;\n        \n        mockFileHandle.read.mockResolvedValue({ bytesRead: 0 });\n        mockFileHandle.close.mockResolvedValue(undefined);\n        \n        mockFs.open.mockResolvedValue(mockFileHandle);\n        \n        await headFile('/test/file.txt', 2);\n        \n        expect(mockFs.open).toHaveBeenCalledWith('/test/file.txt', 'r');\n      });\n\n      it('handles files with content and returns first lines', async () => {\n        const mockFileHandle = {\n          read: vi.fn(),\n          close: vi.fn()\n        } as any;\n        \n        // Simulate reading file content with newlines\n        mockFileHandle.read\n          .mockResolvedValueOnce({ bytesRead: 20, buffer: Buffer.from('line1\\nline2\\nline3\\n') })\n          .mockResolvedValueOnce({ bytesRead: 0 });\n        mockFileHandle.close.mockResolvedValue(undefined);\n        \n        mockFs.open.mockResolvedValue(mockFileHandle);\n        \n        const result = await headFile('/test/file.txt', 2);\n        \n        expect(mockFileHandle.close).toHaveBeenCalled();\n      });\n\n      it('handles files with leftover content', async () => {\n        const mockFileHandle = {\n          read: vi.fn(),\n          close: vi.fn()\n        } as any;\n        \n        // Simulate reading file content without final newline\n        mockFileHandle.read\n          .mockResolvedValueOnce({ bytesRead: 15, buffer: Buffer.from('line1\\nline2\\nend') })\n          .mockResolvedValueOnce({ bytesRead: 0 });\n        mockFileHandle.close.mockResolvedValue(undefined);\n        \n        mockFs.open.mockResolvedValue(mockFileHandle);\n        \n        const result = await headFile('/test/file.txt', 5);\n        \n        expect(mockFileHandle.close).toHaveBeenCalled();\n      });\n\n      it('handles reaching requested line count', async () => {\n        const mockFileHandle = {\n          read: vi.fn(),\n          close: vi.fn()\n        } as any;\n        \n        // Simulate reading exactly the requested number of lines\n        mockFileHandle.read\n          .mockResolvedValueOnce({ bytesRead: 12, buffer: Buffer.from('line1\\nline2\\n') })\n          .mockResolvedValueOnce({ bytesRead: 0 });\n        mockFileHandle.close.mockResolvedValue(undefined);\n        \n        mockFs.open.mockResolvedValue(mockFileHandle);\n        \n        const result = await headFile('/test/file.txt', 2);\n        \n        expect(mockFileHandle.close).toHaveBeenCalled();\n      });\n    });\n  });\n});\n"
  },
  {
    "path": "src/filesystem/__tests__/path-utils.test.ts",
    "content": "import { describe, it, expect, afterEach } from 'vitest';\nimport { normalizePath, expandHome, convertToWindowsPath } from '../path-utils.js';\n\ndescribe('Path Utilities', () => {\n  describe('convertToWindowsPath', () => {\n    it('leaves Unix paths unchanged', () => {\n      expect(convertToWindowsPath('/usr/local/bin'))\n        .toBe('/usr/local/bin');\n      expect(convertToWindowsPath('/home/user/some path'))\n        .toBe('/home/user/some path');\n    });\n\n    it('never converts WSL paths (they work correctly in WSL with Node.js fs)', () => {\n      // WSL paths should NEVER be converted, regardless of platform\n      // They are valid Linux paths that work with Node.js fs operations inside WSL\n      expect(convertToWindowsPath('/mnt/c/NS/MyKindleContent'))\n        .toBe('/mnt/c/NS/MyKindleContent');\n      expect(convertToWindowsPath('/mnt/d/Documents'))\n        .toBe('/mnt/d/Documents');\n    });\n\n    it('converts Unix-style Windows paths only on Windows platform', () => {\n      // On Windows, /c/ style paths should be converted\n      if (process.platform === 'win32') {\n        expect(convertToWindowsPath('/c/NS/MyKindleContent'))\n          .toBe('C:\\\\NS\\\\MyKindleContent');\n      } else {\n        // On Linux, leave them unchanged\n        expect(convertToWindowsPath('/c/NS/MyKindleContent'))\n          .toBe('/c/NS/MyKindleContent');\n      }\n    });\n\n    it('leaves Windows paths unchanged but ensures backslashes', () => {\n      expect(convertToWindowsPath('C:\\\\NS\\\\MyKindleContent'))\n        .toBe('C:\\\\NS\\\\MyKindleContent');\n      expect(convertToWindowsPath('C:/NS/MyKindleContent'))\n        .toBe('C:\\\\NS\\\\MyKindleContent');\n    });\n\n    it('handles Windows paths with spaces', () => {\n      expect(convertToWindowsPath('C:\\\\Program Files\\\\Some App'))\n        .toBe('C:\\\\Program Files\\\\Some App');\n      expect(convertToWindowsPath('C:/Program Files/Some App'))\n        .toBe('C:\\\\Program Files\\\\Some App');\n    });\n\n    it('handles drive letter paths based on platform', () => {\n      // WSL paths should never be converted\n      expect(convertToWindowsPath('/mnt/d/some/path'))\n        .toBe('/mnt/d/some/path');\n\n      if (process.platform === 'win32') {\n        // On Windows, Unix-style paths like /d/ should be converted\n        expect(convertToWindowsPath('/d/some/path'))\n          .toBe('D:\\\\some\\\\path');\n      } else {\n        // On Linux, /d/ is just a regular Unix path\n        expect(convertToWindowsPath('/d/some/path'))\n          .toBe('/d/some/path');\n      }\n    });\n  });\n\n  describe('normalizePath', () => {\n    it('preserves Unix paths', () => {\n      expect(normalizePath('/usr/local/bin'))\n        .toBe('/usr/local/bin');\n      expect(normalizePath('/home/user/some path'))\n        .toBe('/home/user/some path');\n      expect(normalizePath('\"/usr/local/some app/\"'))\n        .toBe('/usr/local/some app');\n      expect(normalizePath('/usr/local//bin/app///'))\n        .toBe('/usr/local/bin/app');\n      expect(normalizePath('/'))\n        .toBe('/');\n      expect(normalizePath('///'))\n        .toBe('/');\n    });\n\n    it('removes surrounding quotes', () => {\n      expect(normalizePath('\"C:\\\\NS\\\\My Kindle Content\"'))\n        .toBe('C:\\\\NS\\\\My Kindle Content');\n    });\n\n    it('normalizes backslashes', () => {\n      expect(normalizePath('C:\\\\\\\\NS\\\\\\\\MyKindleContent'))\n        .toBe('C:\\\\NS\\\\MyKindleContent');\n    });\n\n    it('converts forward slashes to backslashes on Windows', () => {\n      expect(normalizePath('C:/NS/MyKindleContent'))\n        .toBe('C:\\\\NS\\\\MyKindleContent');\n    });\n\n    it('always preserves WSL paths (they work correctly in WSL)', () => {\n      // WSL paths should ALWAYS be preserved, regardless of platform\n      // This is the fix for issue #2795\n      expect(normalizePath('/mnt/c/NS/MyKindleContent'))\n        .toBe('/mnt/c/NS/MyKindleContent');\n      expect(normalizePath('/mnt/d/Documents'))\n        .toBe('/mnt/d/Documents');\n    });\n\n    it('handles Unix-style Windows paths', () => {\n      // On Windows, /c/ paths should be converted\n      if (process.platform === 'win32') {\n        expect(normalizePath('/c/NS/MyKindleContent'))\n          .toBe('C:\\\\NS\\\\MyKindleContent');\n      } else if (process.platform === 'linux') {\n        // On Linux, /c/ is just a regular Unix path\n        expect(normalizePath('/c/NS/MyKindleContent'))\n          .toBe('/c/NS/MyKindleContent');\n      }\n    });\n\n    it('handles paths with spaces and mixed slashes', () => {\n      expect(normalizePath('C:/NS/My Kindle Content'))\n        .toBe('C:\\\\NS\\\\My Kindle Content');\n      // WSL paths should always be preserved\n      expect(normalizePath('/mnt/c/NS/My Kindle Content'))\n        .toBe('/mnt/c/NS/My Kindle Content');\n      expect(normalizePath('C:\\\\Program Files (x86)\\\\App Name'))\n        .toBe('C:\\\\Program Files (x86)\\\\App Name');\n      expect(normalizePath('\"C:\\\\Program Files\\\\App Name\"'))\n        .toBe('C:\\\\Program Files\\\\App Name');\n      expect(normalizePath('  C:\\\\Program Files\\\\App Name  '))\n        .toBe('C:\\\\Program Files\\\\App Name');\n    });\n\n    it('preserves spaces in all path formats', () => {\n      // WSL paths should always be preserved\n      expect(normalizePath('/mnt/c/Program Files/App Name'))\n        .toBe('/mnt/c/Program Files/App Name');\n\n      if (process.platform === 'win32') {\n        // On Windows, Unix-style paths like /c/ should be converted\n        expect(normalizePath('/c/Program Files/App Name'))\n          .toBe('C:\\\\Program Files\\\\App Name');\n      } else {\n        // On Linux, /c/ is just a regular Unix path\n        expect(normalizePath('/c/Program Files/App Name'))\n          .toBe('/c/Program Files/App Name');\n      }\n      expect(normalizePath('C:/Program Files/App Name'))\n        .toBe('C:\\\\Program Files\\\\App Name');\n    });\n\n    it('handles special characters in paths', () => {\n      // Test ampersand in path\n      expect(normalizePath('C:\\\\NS\\\\Sub&Folder'))\n        .toBe('C:\\\\NS\\\\Sub&Folder');\n      expect(normalizePath('C:/NS/Sub&Folder'))\n        .toBe('C:\\\\NS\\\\Sub&Folder');\n      // WSL paths should always be preserved\n      expect(normalizePath('/mnt/c/NS/Sub&Folder'))\n        .toBe('/mnt/c/NS/Sub&Folder');\n\n      // Test tilde in path (short names in Windows)\n      expect(normalizePath('C:\\\\NS\\\\MYKIND~1'))\n        .toBe('C:\\\\NS\\\\MYKIND~1');\n      expect(normalizePath('/Users/NEMANS~1/FOLDER~2/SUBFO~1/Public/P12PST~1'))\n        .toBe('/Users/NEMANS~1/FOLDER~2/SUBFO~1/Public/P12PST~1');\n\n      // Test other special characters\n      expect(normalizePath('C:\\\\Path with #hash'))\n        .toBe('C:\\\\Path with #hash');\n      expect(normalizePath('C:\\\\Path with (parentheses)'))\n        .toBe('C:\\\\Path with (parentheses)');\n      expect(normalizePath('C:\\\\Path with [brackets]'))\n        .toBe('C:\\\\Path with [brackets]');\n      expect(normalizePath('C:\\\\Path with @at+plus$dollar%percent'))\n        .toBe('C:\\\\Path with @at+plus$dollar%percent');\n    });\n\n    it('capitalizes lowercase drive letters for Windows paths', () => {\n      expect(normalizePath('c:/windows/system32'))\n        .toBe('C:\\\\windows\\\\system32');\n      // WSL paths should always be preserved\n      expect(normalizePath('/mnt/d/my/folder'))\n        .toBe('/mnt/d/my/folder');\n\n      if (process.platform === 'win32') {\n        // On Windows, Unix-style paths should be converted and capitalized\n        expect(normalizePath('/e/another/folder'))\n          .toBe('E:\\\\another\\\\folder');\n      } else {\n        // On Linux, /e/ is just a regular Unix path\n        expect(normalizePath('/e/another/folder'))\n          .toBe('/e/another/folder');\n      }\n    });\n\n    it('handles UNC paths correctly', () => {\n      // UNC paths should preserve the leading double backslash\n      const uncPath = '\\\\\\\\SERVER\\\\share\\\\folder';\n      expect(normalizePath(uncPath)).toBe('\\\\\\\\SERVER\\\\share\\\\folder');\n      \n      // Test UNC path with double backslashes that need normalization\n      const uncPathWithDoubles = '\\\\\\\\\\\\\\\\SERVER\\\\\\\\share\\\\\\\\folder';\n      expect(normalizePath(uncPathWithDoubles)).toBe('\\\\\\\\SERVER\\\\share\\\\folder');\n    });\n\n    it('returns normalized non-Windows/WSL/Unix-style Windows paths as is after basic normalization', () => {\n      // A path that looks somewhat absolute but isn't a drive or recognized Unix root for Windows conversion\n      // These paths should be preserved as-is (not converted to Windows C:\\ format or WSL format)\n      const otherAbsolutePath = '\\\\someserver\\\\share\\\\file';\n      expect(normalizePath(otherAbsolutePath)).toBe(otherAbsolutePath);\n    });\n  });\n\n  describe('expandHome', () => {\n    it('expands ~ to home directory', () => {\n      const result = expandHome('~/test');\n      expect(result).toContain('test');\n      expect(result).not.toContain('~');\n    });\n\n    it('expands bare ~ to home directory', () => {\n      const result = expandHome('~');\n      expect(result).not.toContain('~');\n      expect(result.length).toBeGreaterThan(0);\n    });\n\n    it('leaves other paths unchanged', () => {\n      expect(expandHome('C:/test')).toBe('C:/test');\n    });\n  });\n\n  describe('WSL path handling (issue #2795 fix)', () => {\n    // Save original platform\n    const originalPlatform = process.platform;\n\n    afterEach(() => {\n      // Restore platform after each test\n      Object.defineProperty(process, 'platform', {\n        value: originalPlatform,\n        writable: true,\n        configurable: true\n      });\n    });\n\n    it('should NEVER convert WSL paths - they work correctly in WSL with Node.js fs', () => {\n      // The key insight: When running `wsl npx ...`, Node.js runs INSIDE WSL (process.platform === 'linux')\n      // and /mnt/c/ paths work correctly with Node.js fs operations in that environment.\n      // Converting them to C:\\ format breaks fs operations because Windows paths don't work inside WSL.\n\n      // Mock Linux platform (inside WSL)\n      Object.defineProperty(process, 'platform', {\n        value: 'linux',\n        writable: true,\n        configurable: true\n      });\n\n      // WSL paths should NOT be converted, even inside WSL\n      expect(normalizePath('/mnt/c/Users/username/folder'))\n        .toBe('/mnt/c/Users/username/folder');\n\n      expect(normalizePath('/mnt/d/Documents/project'))\n        .toBe('/mnt/d/Documents/project');\n    });\n\n    it('should also preserve WSL paths when running on Windows', () => {\n      // Mock Windows platform\n      Object.defineProperty(process, 'platform', {\n        value: 'win32',\n        writable: true,\n        configurable: true\n      });\n\n      // WSL paths should still be preserved (though they wouldn't be accessible from Windows Node.js)\n      expect(normalizePath('/mnt/c/Users/username/folder'))\n        .toBe('/mnt/c/Users/username/folder');\n\n      expect(normalizePath('/mnt/d/Documents/project'))\n        .toBe('/mnt/d/Documents/project');\n    });\n\n    it('should convert Unix-style Windows paths (/c/) only when running on Windows (win32)', () => {\n      // Mock process.platform to be 'win32' (Windows)\n      Object.defineProperty(process, 'platform', {\n        value: 'win32',\n        writable: true,\n        configurable: true\n      });\n\n      // Unix-style Windows paths like /c/ should be converted on Windows\n      expect(normalizePath('/c/Users/username/folder'))\n        .toBe('C:\\\\Users\\\\username\\\\folder');\n\n      expect(normalizePath('/d/Documents/project'))\n        .toBe('D:\\\\Documents\\\\project');\n    });\n\n    it('should NOT convert Unix-style paths (/c/) when running inside WSL (linux)', () => {\n      // Mock process.platform to be 'linux' (WSL/Linux)\n      Object.defineProperty(process, 'platform', {\n        value: 'linux',\n        writable: true,\n        configurable: true\n      });\n\n      // When on Linux, /c/ is just a regular Unix directory, not a drive letter\n      expect(normalizePath('/c/some/path'))\n        .toBe('/c/some/path');\n\n      expect(normalizePath('/d/another/path'))\n        .toBe('/d/another/path');\n    });\n\n    it('should preserve regular Unix paths on all platforms', () => {\n      // Test on Linux\n      Object.defineProperty(process, 'platform', {\n        value: 'linux',\n        writable: true,\n        configurable: true\n      });\n\n      expect(normalizePath('/home/user/documents'))\n        .toBe('/home/user/documents');\n\n      expect(normalizePath('/var/log/app'))\n        .toBe('/var/log/app');\n\n      // Test on Windows (though these paths wouldn't work on Windows)\n      Object.defineProperty(process, 'platform', {\n        value: 'win32',\n        writable: true,\n        configurable: true\n      });\n\n      expect(normalizePath('/home/user/documents'))\n        .toBe('/home/user/documents');\n\n      expect(normalizePath('/var/log/app'))\n        .toBe('/var/log/app');\n    });\n\n    it('reproduces exact scenario from issue #2795', () => {\n      // Simulate running inside WSL: wsl npx @modelcontextprotocol/server-filesystem /mnt/c/Users/username/folder\n      Object.defineProperty(process, 'platform', {\n        value: 'linux',\n        writable: true,\n        configurable: true\n      });\n\n      // This is the exact path from the issue\n      const inputPath = '/mnt/c/Users/username/folder';\n      const result = normalizePath(inputPath);\n\n      // Should NOT convert to C:\\Users\\username\\folder\n      expect(result).toBe('/mnt/c/Users/username/folder');\n      expect(result).not.toContain('C:');\n      expect(result).not.toContain('\\\\');\n    });\n\n    it('normalizes bare Windows drive letters to the drive root on Windows', () => {\n      Object.defineProperty(process, 'platform', {\n        value: 'win32',\n        writable: true,\n        configurable: true\n      });\n\n      expect(normalizePath('C:')).toBe('C:\\\\');\n      expect(normalizePath('d:')).toBe('D:\\\\');\n    });\n\n    it('should handle relative path slash conversion based on platform', () => {\n      // This test verifies platform-specific behavior naturally without mocking\n      // On Windows: forward slashes converted to backslashes\n      // On Linux/Unix: forward slashes preserved\n      const relativePath = 'some/relative/path';\n      const result = normalizePath(relativePath);\n\n      if (originalPlatform === 'win32') {\n        expect(result).toBe('some\\\\relative\\\\path');\n      } else {\n        expect(result).toBe('some/relative/path');\n      }\n    });\n  });\n});\n"
  },
  {
    "path": "src/filesystem/__tests__/path-validation.test.ts",
    "content": "import { describe, it, expect, beforeEach, afterEach } from 'vitest';\nimport * as path from 'path';\nimport * as fs from 'fs/promises';\nimport * as os from 'os';\nimport { isPathWithinAllowedDirectories } from '../path-validation.js';\n\n/**\n * Check if the current environment supports symlink creation\n */\nasync function checkSymlinkSupport(): Promise<boolean> {\n  const testDir = await fs.mkdtemp(path.join(os.tmpdir(), 'symlink-test-'));\n  try {\n    const targetFile = path.join(testDir, 'target.txt');\n    const linkFile = path.join(testDir, 'link.txt');\n    \n    await fs.writeFile(targetFile, 'test');\n    await fs.symlink(targetFile, linkFile);\n    \n    // If we get here, symlinks are supported\n    return true;\n  } catch (error) {\n    // EPERM indicates no symlink permissions\n    if ((error as NodeJS.ErrnoException).code === 'EPERM') {\n      return false;\n    }\n    // Other errors might indicate a real problem\n    throw error;\n  } finally {\n    await fs.rm(testDir, { recursive: true, force: true });\n  }\n}\n\n// Global variable to store symlink support status\nlet symlinkSupported: boolean | null = null;\n\n/**\n * Get cached symlink support status, checking once per test run\n */\nasync function getSymlinkSupport(): Promise<boolean> {\n  if (symlinkSupported === null) {\n    symlinkSupported = await checkSymlinkSupport();\n    if (!symlinkSupported) {\n      console.log('\\n⚠️  Symlink tests will be skipped - symlink creation not supported in this environment');\n      console.log('   On Windows, enable Developer Mode or run as Administrator to enable symlink tests');\n    }\n  }\n  return symlinkSupported;\n}\n\ndescribe('Path Validation', () => {\n  it('allows exact directory match', () => {\n    const allowed = ['/home/user/project'];\n    expect(isPathWithinAllowedDirectories('/home/user/project', allowed)).toBe(true);\n  });\n\n  it('allows subdirectories', () => {\n    const allowed = ['/home/user/project'];\n    expect(isPathWithinAllowedDirectories('/home/user/project/src', allowed)).toBe(true);\n    expect(isPathWithinAllowedDirectories('/home/user/project/src/index.js', allowed)).toBe(true);\n    expect(isPathWithinAllowedDirectories('/home/user/project/deeply/nested/file.txt', allowed)).toBe(true);\n  });\n\n  it('blocks similar directory names (prefix vulnerability)', () => {\n    const allowed = ['/home/user/project'];\n    expect(isPathWithinAllowedDirectories('/home/user/project2', allowed)).toBe(false);\n    expect(isPathWithinAllowedDirectories('/home/user/project_backup', allowed)).toBe(false);\n    expect(isPathWithinAllowedDirectories('/home/user/project-old', allowed)).toBe(false);\n    expect(isPathWithinAllowedDirectories('/home/user/projectile', allowed)).toBe(false);\n    expect(isPathWithinAllowedDirectories('/home/user/project.bak', allowed)).toBe(false);\n  });\n\n  it('blocks paths outside allowed directories', () => {\n    const allowed = ['/home/user/project'];\n    expect(isPathWithinAllowedDirectories('/home/user/other', allowed)).toBe(false);\n    expect(isPathWithinAllowedDirectories('/etc/passwd', allowed)).toBe(false);\n    expect(isPathWithinAllowedDirectories('/home/user', allowed)).toBe(false);\n    expect(isPathWithinAllowedDirectories('/', allowed)).toBe(false);\n  });\n\n  it('handles multiple allowed directories', () => {\n    const allowed = ['/home/user/project1', '/home/user/project2'];\n    expect(isPathWithinAllowedDirectories('/home/user/project1/src', allowed)).toBe(true);\n    expect(isPathWithinAllowedDirectories('/home/user/project2/src', allowed)).toBe(true);\n    expect(isPathWithinAllowedDirectories('/home/user/project3', allowed)).toBe(false);\n    expect(isPathWithinAllowedDirectories('/home/user/project1_backup', allowed)).toBe(false);\n    expect(isPathWithinAllowedDirectories('/home/user/project2-old', allowed)).toBe(false);\n  });\n\n  it('blocks parent and sibling directories', () => {\n    const allowed = ['/test/allowed'];\n\n    // Parent directory\n    expect(isPathWithinAllowedDirectories('/test', allowed)).toBe(false);\n    expect(isPathWithinAllowedDirectories('/', allowed)).toBe(false);\n\n    // Sibling with common prefix\n    expect(isPathWithinAllowedDirectories('/test/allowed_sibling', allowed)).toBe(false);\n    expect(isPathWithinAllowedDirectories('/test/allowed2', allowed)).toBe(false);\n  });\n\n  it('handles paths with special characters', () => {\n    const allowed = ['/home/user/my-project (v2)'];\n\n    expect(isPathWithinAllowedDirectories('/home/user/my-project (v2)', allowed)).toBe(true);\n    expect(isPathWithinAllowedDirectories('/home/user/my-project (v2)/src', allowed)).toBe(true);\n    expect(isPathWithinAllowedDirectories('/home/user/my-project (v2)_backup', allowed)).toBe(false);\n    expect(isPathWithinAllowedDirectories('/home/user/my-project', allowed)).toBe(false);\n  });\n\n  describe('Input validation', () => {\n    it('rejects empty inputs', () => {\n      const allowed = ['/home/user/project'];\n\n      expect(isPathWithinAllowedDirectories('', allowed)).toBe(false);\n      expect(isPathWithinAllowedDirectories('/home/user/project', [])).toBe(false);\n    });\n\n    it('handles trailing separators correctly', () => {\n      const allowed = ['/home/user/project'];\n\n      // Path with trailing separator should still match\n      expect(isPathWithinAllowedDirectories('/home/user/project/', allowed)).toBe(true);\n\n      // Allowed directory with trailing separator\n      const allowedWithSep = ['/home/user/project/'];\n      expect(isPathWithinAllowedDirectories('/home/user/project', allowedWithSep)).toBe(true);\n      expect(isPathWithinAllowedDirectories('/home/user/project/', allowedWithSep)).toBe(true);\n\n      // Should still block similar names with or without trailing separators\n      expect(isPathWithinAllowedDirectories('/home/user/project2', allowedWithSep)).toBe(false);\n      expect(isPathWithinAllowedDirectories('/home/user/project2', allowed)).toBe(false);\n      expect(isPathWithinAllowedDirectories('/home/user/project2/', allowed)).toBe(false);\n    });\n\n    it('skips empty directory entries in allowed list', () => {\n      const allowed = ['', '/home/user/project', ''];\n      expect(isPathWithinAllowedDirectories('/home/user/project', allowed)).toBe(true);\n      expect(isPathWithinAllowedDirectories('/home/user/project/src', allowed)).toBe(true);\n\n      // Should still validate properly with empty entries\n      expect(isPathWithinAllowedDirectories('/home/user/other', allowed)).toBe(false);\n    });\n\n    it('handles Windows paths with trailing separators', () => {\n      if (path.sep === '\\\\') {\n        const allowed = ['C:\\\\Users\\\\project'];\n\n        // Path with trailing separator\n        expect(isPathWithinAllowedDirectories('C:\\\\Users\\\\project\\\\', allowed)).toBe(true);\n\n        // Allowed with trailing separator\n        const allowedWithSep = ['C:\\\\Users\\\\project\\\\'];\n        expect(isPathWithinAllowedDirectories('C:\\\\Users\\\\project', allowedWithSep)).toBe(true);\n        expect(isPathWithinAllowedDirectories('C:\\\\Users\\\\project\\\\', allowedWithSep)).toBe(true);\n\n        // Should still block similar names\n        expect(isPathWithinAllowedDirectories('C:\\\\Users\\\\project2\\\\', allowed)).toBe(false);\n      }\n    });\n  });\n\n  describe('Error handling', () => {\n    it('normalizes relative paths to absolute', () => {\n      const allowed = [process.cwd()];\n\n      // Relative paths get normalized to absolute paths based on cwd\n      expect(isPathWithinAllowedDirectories('relative/path', allowed)).toBe(true);\n      expect(isPathWithinAllowedDirectories('./file', allowed)).toBe(true);\n\n      // Parent directory references that escape allowed directory\n      const parentAllowed = ['/home/user/project'];\n      expect(isPathWithinAllowedDirectories('../parent', parentAllowed)).toBe(false);\n    });\n\n    it('returns false for relative paths in allowed directories', () => {\n      const badAllowed = ['relative/path', '/some/other/absolute/path'];\n\n      // Relative paths in allowed dirs are normalized to absolute based on cwd\n      // The normalized 'relative/path' won't match our test path\n      expect(isPathWithinAllowedDirectories('/some/other/absolute/path/file', badAllowed)).toBe(true);\n      expect(isPathWithinAllowedDirectories('/absolute/path/file', badAllowed)).toBe(false);\n    });\n\n    it('handles null and undefined inputs gracefully', () => {\n      const allowed = ['/home/user/project'];\n\n      // Should return false, not crash\n      expect(isPathWithinAllowedDirectories(null as any, allowed)).toBe(false);\n      expect(isPathWithinAllowedDirectories(undefined as any, allowed)).toBe(false);\n      expect(isPathWithinAllowedDirectories('/path', null as any)).toBe(false);\n      expect(isPathWithinAllowedDirectories('/path', undefined as any)).toBe(false);\n    });\n  });\n\n  describe('Unicode and special characters', () => {\n    it('handles unicode characters in paths', () => {\n      const allowed = ['/home/user/café'];\n\n      expect(isPathWithinAllowedDirectories('/home/user/café', allowed)).toBe(true);\n      expect(isPathWithinAllowedDirectories('/home/user/café/file', allowed)).toBe(true);\n\n      // Different unicode representation won't match (not normalized)\n      const decomposed = '/home/user/cafe\\u0301'; // e + combining accent\n      expect(isPathWithinAllowedDirectories(decomposed, allowed)).toBe(false);\n    });\n\n    it('handles paths with spaces correctly', () => {\n      const allowed = ['/home/user/my project'];\n\n      expect(isPathWithinAllowedDirectories('/home/user/my project', allowed)).toBe(true);\n      expect(isPathWithinAllowedDirectories('/home/user/my project/file', allowed)).toBe(true);\n\n      // Partial matches should fail\n      expect(isPathWithinAllowedDirectories('/home/user/my', allowed)).toBe(false);\n      expect(isPathWithinAllowedDirectories('/home/user/my proj', allowed)).toBe(false);\n    });\n  });\n\n  describe('Overlapping allowed directories', () => {\n    it('handles nested allowed directories correctly', () => {\n      const allowed = ['/home', '/home/user', '/home/user/project'];\n\n      // All paths under /home are allowed\n      expect(isPathWithinAllowedDirectories('/home/anything', allowed)).toBe(true);\n      expect(isPathWithinAllowedDirectories('/home/user/anything', allowed)).toBe(true);\n      expect(isPathWithinAllowedDirectories('/home/user/project/anything', allowed)).toBe(true);\n\n      // First match wins (most permissive)\n      expect(isPathWithinAllowedDirectories('/home/other/deep/path', allowed)).toBe(true);\n    });\n\n    it('handles root directory as allowed', () => {\n      const allowed = ['/'];\n\n      // Everything is allowed under root (dangerous configuration)\n      expect(isPathWithinAllowedDirectories('/', allowed)).toBe(true);\n      expect(isPathWithinAllowedDirectories('/any/path', allowed)).toBe(true);\n      expect(isPathWithinAllowedDirectories('/etc/passwd', allowed)).toBe(true);\n      expect(isPathWithinAllowedDirectories('/home/user/secret', allowed)).toBe(true);\n\n      // But only on the same filesystem root\n      if (path.sep === '\\\\') {\n        expect(isPathWithinAllowedDirectories('D:\\\\other', ['/'])).toBe(false);\n      }\n    });\n  });\n\n  describe('Cross-platform behavior', () => {\n    it('handles Windows-style paths on Windows', () => {\n      if (path.sep === '\\\\') {\n        const allowed = ['C:\\\\Users\\\\project'];\n        expect(isPathWithinAllowedDirectories('C:\\\\Users\\\\project', allowed)).toBe(true);\n        expect(isPathWithinAllowedDirectories('C:\\\\Users\\\\project\\\\src', allowed)).toBe(true);\n        expect(isPathWithinAllowedDirectories('C:\\\\Users\\\\project2', allowed)).toBe(false);\n        expect(isPathWithinAllowedDirectories('C:\\\\Users\\\\project_backup', allowed)).toBe(false);\n      }\n    });\n\n    it('handles Unix-style paths on Unix', () => {\n      if (path.sep === '/') {\n        const allowed = ['/home/user/project'];\n        expect(isPathWithinAllowedDirectories('/home/user/project', allowed)).toBe(true);\n        expect(isPathWithinAllowedDirectories('/home/user/project/src', allowed)).toBe(true);\n        expect(isPathWithinAllowedDirectories('/home/user/project2', allowed)).toBe(false);\n      }\n    });\n  });\n\n  describe('Validation Tests - Path Traversal', () => {\n    it('blocks path traversal attempts', () => {\n      const allowed = ['/home/user/project'];\n\n      // Basic traversal attempts\n      expect(isPathWithinAllowedDirectories('/home/user/project/../../../etc/passwd', allowed)).toBe(false);\n      expect(isPathWithinAllowedDirectories('/home/user/project/../../other', allowed)).toBe(false);\n      expect(isPathWithinAllowedDirectories('/home/user/project/../project2', allowed)).toBe(false);\n\n      // Mixed traversal with valid segments\n      expect(isPathWithinAllowedDirectories('/home/user/project/src/../../project2', allowed)).toBe(false);\n      expect(isPathWithinAllowedDirectories('/home/user/project/./../../other', allowed)).toBe(false);\n\n      // Multiple traversal sequences\n      expect(isPathWithinAllowedDirectories('/home/user/project/../project/../../../etc', allowed)).toBe(false);\n    });\n\n    it('blocks traversal in allowed directories', () => {\n      const allowed = ['/home/user/project/../safe'];\n\n      // The allowed directory itself should be normalized and safe\n      expect(isPathWithinAllowedDirectories('/home/user/safe/file', allowed)).toBe(true);\n      expect(isPathWithinAllowedDirectories('/home/user/project/file', allowed)).toBe(false);\n    });\n\n    it('handles complex traversal patterns', () => {\n      const allowed = ['/home/user/project'];\n\n      // Double dots in filenames (not traversal) - these normalize to paths within allowed dir\n      expect(isPathWithinAllowedDirectories('/home/user/project/..test', allowed)).toBe(true); // Not traversal\n      expect(isPathWithinAllowedDirectories('/home/user/project/test..', allowed)).toBe(true); // Not traversal\n      expect(isPathWithinAllowedDirectories('/home/user/project/te..st', allowed)).toBe(true); // Not traversal\n\n      // Actual traversal\n      expect(isPathWithinAllowedDirectories('/home/user/project/../test', allowed)).toBe(false); // Is traversal - goes to /home/user/test\n\n      // Edge case: /home/user/project/.. normalizes to /home/user (parent dir)\n      expect(isPathWithinAllowedDirectories('/home/user/project/..', allowed)).toBe(false); // Goes to parent\n    });\n  });\n\n  describe('Validation Tests - Null Bytes', () => {\n    it('rejects paths with null bytes', () => {\n      const allowed = ['/home/user/project'];\n\n      expect(isPathWithinAllowedDirectories('/home/user/project\\x00/etc/passwd', allowed)).toBe(false);\n      expect(isPathWithinAllowedDirectories('/home/user/project/test\\x00.txt', allowed)).toBe(false);\n      expect(isPathWithinAllowedDirectories('\\x00/home/user/project', allowed)).toBe(false);\n      expect(isPathWithinAllowedDirectories('/home/user/project/\\x00', allowed)).toBe(false);\n    });\n\n    it('rejects allowed directories with null bytes', () => {\n      const allowed = ['/home/user/project\\x00'];\n\n      expect(isPathWithinAllowedDirectories('/home/user/project', allowed)).toBe(false);\n      expect(isPathWithinAllowedDirectories('/home/user/project/file', allowed)).toBe(false);\n    });\n  });\n\n  describe('Validation Tests - Special Characters', () => {\n    it('allows percent signs in filenames', () => {\n      const allowed = ['/home/user/project'];\n\n      // Percent is a valid filename character\n      expect(isPathWithinAllowedDirectories('/home/user/project/report_50%.pdf', allowed)).toBe(true);\n      expect(isPathWithinAllowedDirectories('/home/user/project/Q1_25%_growth', allowed)).toBe(true);\n      expect(isPathWithinAllowedDirectories('/home/user/project/%41', allowed)).toBe(true); // File named %41\n\n      // URL encoding is NOT decoded by path.normalize, so these are just odd filenames\n      expect(isPathWithinAllowedDirectories('/home/user/project/%2e%2e', allowed)).toBe(true); // File named \"%2e%2e\"\n      expect(isPathWithinAllowedDirectories('/home/user/project/file%20name', allowed)).toBe(true); // File with %20 in name\n    });\n\n    it('handles percent signs in allowed directories', () => {\n      const allowed = ['/home/user/project%20files'];\n\n      // This is a directory literally named \"project%20files\"\n      expect(isPathWithinAllowedDirectories('/home/user/project%20files/test', allowed)).toBe(true);\n      expect(isPathWithinAllowedDirectories('/home/user/project files/test', allowed)).toBe(false); // Different dir\n    });\n  });\n\n  describe('Path Normalization', () => {\n    it('normalizes paths before comparison', () => {\n      const allowed = ['/home/user/project'];\n\n      // Trailing slashes\n      expect(isPathWithinAllowedDirectories('/home/user/project/', allowed)).toBe(true);\n      expect(isPathWithinAllowedDirectories('/home/user/project//', allowed)).toBe(true);\n      expect(isPathWithinAllowedDirectories('/home/user/project///', allowed)).toBe(true);\n\n      // Current directory references\n      expect(isPathWithinAllowedDirectories('/home/user/project/./src', allowed)).toBe(true);\n      expect(isPathWithinAllowedDirectories('/home/user/./project/src', allowed)).toBe(true);\n\n      // Multiple slashes\n      expect(isPathWithinAllowedDirectories('/home/user/project//src//file', allowed)).toBe(true);\n      expect(isPathWithinAllowedDirectories('/home//user//project//src', allowed)).toBe(true);\n\n      // Should still block outside paths\n      expect(isPathWithinAllowedDirectories('/home/user//project2', allowed)).toBe(false);\n    });\n\n    it('handles mixed separators correctly', () => {\n      if (path.sep === '\\\\') {\n        const allowed = ['C:\\\\Users\\\\project'];\n\n        // Mixed separators should be normalized\n        expect(isPathWithinAllowedDirectories('C:/Users/project', allowed)).toBe(true);\n        expect(isPathWithinAllowedDirectories('C:\\\\Users/project\\\\src', allowed)).toBe(true);\n        expect(isPathWithinAllowedDirectories('C:/Users\\\\project/src', allowed)).toBe(true);\n      }\n    });\n  });\n\n  describe('Edge Cases', () => {\n    it('rejects non-string inputs safely', () => {\n      const allowed = ['/home/user/project'];\n\n      expect(isPathWithinAllowedDirectories(123 as any, allowed)).toBe(false);\n      expect(isPathWithinAllowedDirectories({} as any, allowed)).toBe(false);\n      expect(isPathWithinAllowedDirectories([] as any, allowed)).toBe(false);\n      expect(isPathWithinAllowedDirectories(null as any, allowed)).toBe(false);\n      expect(isPathWithinAllowedDirectories(undefined as any, allowed)).toBe(false);\n\n      // Non-string in allowed directories\n      expect(isPathWithinAllowedDirectories('/home/user/project', [123 as any])).toBe(false);\n      expect(isPathWithinAllowedDirectories('/home/user/project', [{} as any])).toBe(false);\n    });\n\n    it('handles very long paths', () => {\n      const allowed = ['/home/user/project'];\n\n      // Create a very long path that's still valid\n      const longSubPath = 'a/'.repeat(1000) + 'file.txt';\n      expect(isPathWithinAllowedDirectories(`/home/user/project/${longSubPath}`, allowed)).toBe(true);\n\n      // Very long path that escapes\n      const escapePath = 'a/'.repeat(1000) + '../'.repeat(1001) + 'etc/passwd';\n      expect(isPathWithinAllowedDirectories(`/home/user/project/${escapePath}`, allowed)).toBe(false);\n    });\n  });\n\n  describe('Additional Coverage', () => {\n    it('handles allowed directories with traversal that normalizes safely', () => {\n      // These allowed dirs contain traversal but normalize to valid paths\n      const allowed = ['/home/user/../user/project'];\n\n      // Should normalize to /home/user/project and work correctly\n      expect(isPathWithinAllowedDirectories('/home/user/project/file', allowed)).toBe(true);\n      expect(isPathWithinAllowedDirectories('/home/user/other', allowed)).toBe(false);\n    });\n\n    it('handles symbolic dots in filenames', () => {\n      const allowed = ['/home/user/project'];\n\n      // Single and double dots as actual filenames (not traversal)\n      expect(isPathWithinAllowedDirectories('/home/user/project/.', allowed)).toBe(true);\n      expect(isPathWithinAllowedDirectories('/home/user/project/..', allowed)).toBe(false); // This normalizes to parent\n      expect(isPathWithinAllowedDirectories('/home/user/project/...', allowed)).toBe(true); // Three dots is a valid filename\n      expect(isPathWithinAllowedDirectories('/home/user/project/....', allowed)).toBe(true); // Four dots is a valid filename\n    });\n\n    it('handles UNC paths on Windows', () => {\n      if (path.sep === '\\\\') {\n        const allowed = ['\\\\\\\\server\\\\share\\\\project'];\n\n        expect(isPathWithinAllowedDirectories('\\\\\\\\server\\\\share\\\\project', allowed)).toBe(true);\n        expect(isPathWithinAllowedDirectories('\\\\\\\\server\\\\share\\\\project\\\\file', allowed)).toBe(true);\n        expect(isPathWithinAllowedDirectories('\\\\\\\\server\\\\share\\\\other', allowed)).toBe(false);\n        expect(isPathWithinAllowedDirectories('\\\\\\\\other\\\\share\\\\project', allowed)).toBe(false);\n      }\n    });\n  });\n\n  describe('Symlink Tests', () => {\n    let testDir: string;\n    let allowedDir: string;\n    let forbiddenDir: string;\n\n    beforeEach(async () => {\n      testDir = await fs.mkdtemp(path.join(os.tmpdir(), 'fs-error-test-'));\n      allowedDir = path.join(testDir, 'allowed');\n      forbiddenDir = path.join(testDir, 'forbidden');\n\n      await fs.mkdir(allowedDir, { recursive: true });\n      await fs.mkdir(forbiddenDir, { recursive: true });\n    });\n\n    afterEach(async () => {\n      await fs.rm(testDir, { recursive: true, force: true });\n    });\n\n    it('validates symlink handling', async () => {\n      // Test with symlinks\n      try {\n        const linkPath = path.join(allowedDir, 'bad-link');\n        const targetPath = path.join(forbiddenDir, 'target.txt');\n\n        await fs.writeFile(targetPath, 'content');\n        await fs.symlink(targetPath, linkPath);\n\n        // In real implementation, this would throw with the resolved path\n        const realPath = await fs.realpath(linkPath);\n        const allowed = [allowedDir];\n\n        // Symlink target should be outside allowed directory\n        expect(isPathWithinAllowedDirectories(realPath, allowed)).toBe(false);\n      } catch (error) {\n        // Skip if no symlink permissions\n      }\n    });\n\n    it('handles non-existent paths correctly', async () => {\n      const newFilePath = path.join(allowedDir, 'subdir', 'newfile.txt');\n\n      // Parent directory doesn't exist\n      try {\n        await fs.access(newFilePath);\n      } catch (error) {\n        expect((error as NodeJS.ErrnoException).code).toBe('ENOENT');\n      }\n\n      // After creating parent, validation should work\n      await fs.mkdir(path.dirname(newFilePath), { recursive: true });\n      const allowed = [allowedDir];\n      expect(isPathWithinAllowedDirectories(newFilePath, allowed)).toBe(true);\n    });\n\n    // Test path resolution consistency for symlinked files\n    it('validates symlinked files consistently between path and resolved forms', async () => {\n      try {\n        // Setup: Create target file in forbidden area\n        const targetFile = path.join(forbiddenDir, 'target.txt');\n        await fs.writeFile(targetFile, 'TARGET_CONTENT');\n\n        // Create symlink inside allowed directory pointing to forbidden file\n        const symlinkPath = path.join(allowedDir, 'link-to-target.txt');\n        await fs.symlink(targetFile, symlinkPath);\n\n        // The symlink path itself passes validation (looks like it's in allowed dir)\n        expect(isPathWithinAllowedDirectories(symlinkPath, [allowedDir])).toBe(true);\n\n        // But the resolved path should fail validation\n        const resolvedPath = await fs.realpath(symlinkPath);\n        expect(isPathWithinAllowedDirectories(resolvedPath, [allowedDir])).toBe(false);\n\n        // Verify the resolved path goes to the forbidden location (normalize both paths for macOS temp dirs)\n        expect(await fs.realpath(resolvedPath)).toBe(await fs.realpath(targetFile));\n      } catch (error) {\n        // Skip if no symlink permissions on the system\n        if ((error as NodeJS.ErrnoException).code !== 'EPERM') {\n          throw error;\n        }\n      }\n    });\n\n    // Test allowed directory resolution behavior\n    it('validates paths correctly when allowed directory is resolved from symlink', async () => {\n      try {\n        // Setup: Create the actual target directory with content\n        const actualTargetDir = path.join(testDir, 'actual-target');\n        await fs.mkdir(actualTargetDir, { recursive: true });\n        const targetFile = path.join(actualTargetDir, 'file.txt');\n        await fs.writeFile(targetFile, 'FILE_CONTENT');\n\n        // Setup: Create symlink directory that points to target\n        const symlinkDir = path.join(testDir, 'symlink-dir');\n        await fs.symlink(actualTargetDir, symlinkDir);\n\n        // Simulate resolved allowed directory (what the server startup should do)\n        const resolvedAllowedDir = await fs.realpath(symlinkDir);\n        const resolvedTargetDir = await fs.realpath(actualTargetDir);\n        expect(resolvedAllowedDir).toBe(resolvedTargetDir);\n\n        // Test 1: File access through original symlink path should pass validation with resolved allowed dir\n        const fileViaSymlink = path.join(symlinkDir, 'file.txt');\n        const resolvedFile = await fs.realpath(fileViaSymlink);\n        expect(isPathWithinAllowedDirectories(resolvedFile, [resolvedAllowedDir])).toBe(true);\n\n        // Test 2: File access through resolved path should also pass validation\n        const fileViaResolved = path.join(resolvedTargetDir, 'file.txt');\n        expect(isPathWithinAllowedDirectories(fileViaResolved, [resolvedAllowedDir])).toBe(true);\n\n        // Test 3: Demonstrate inconsistent behavior with unresolved allowed directories\n        // If allowed dirs were not resolved (storing symlink paths instead):\n        const unresolvedAllowedDirs = [symlinkDir];\n        // This validation would incorrectly fail for the same content:\n        expect(isPathWithinAllowedDirectories(resolvedFile, unresolvedAllowedDirs)).toBe(false);\n\n      } catch (error) {\n        // Skip if no symlink permissions on the system\n        if ((error as NodeJS.ErrnoException).code !== 'EPERM') {\n          throw error;\n        }\n      }\n    });\n\n    // Test for macOS /tmp -> /private/tmp symlink issue (GitHub issue #3253)\n    // When allowed directories include BOTH original and resolved paths,\n    // paths through either form should be accepted\n    it('allows paths through both original and resolved symlink directories', async () => {\n      try {\n        // Setup: Create the actual target directory with content\n        const actualTargetDir = path.join(testDir, 'actual-target');\n        await fs.mkdir(actualTargetDir, { recursive: true });\n        const targetFile = path.join(actualTargetDir, 'file.txt');\n        await fs.writeFile(targetFile, 'FILE_CONTENT');\n\n        // Setup: Create symlink directory that points to target (simulates /tmp -> /private/tmp)\n        const symlinkDir = path.join(testDir, 'symlink-dir');\n        await fs.symlink(actualTargetDir, symlinkDir);\n\n        // Get the resolved path\n        const resolvedDir = await fs.realpath(symlinkDir);\n\n        // THE FIX: Store BOTH original symlink path AND resolved path in allowed directories\n        // This is what the server should do during startup to fix issue #3253\n        const allowedDirsWithBoth = [symlinkDir, resolvedDir];\n\n        // Test 1: Path through original symlink should pass validation\n        // (e.g., user requests /tmp/file.txt when /tmp is in allowed dirs)\n        const fileViaSymlink = path.join(symlinkDir, 'file.txt');\n        expect(isPathWithinAllowedDirectories(fileViaSymlink, allowedDirsWithBoth)).toBe(true);\n\n        // Test 2: Path through resolved directory should also pass validation\n        // (e.g., user requests /private/tmp/file.txt)\n        const fileViaResolved = path.join(resolvedDir, 'file.txt');\n        expect(isPathWithinAllowedDirectories(fileViaResolved, allowedDirsWithBoth)).toBe(true);\n\n        // Test 3: The resolved path of the symlink file should also pass\n        const resolvedFile = await fs.realpath(fileViaSymlink);\n        expect(isPathWithinAllowedDirectories(resolvedFile, allowedDirsWithBoth)).toBe(true);\n\n        // Verify both paths point to the same actual file\n        expect(resolvedFile).toBe(await fs.realpath(fileViaResolved));\n\n      } catch (error) {\n        // Skip if no symlink permissions on the system\n        if ((error as NodeJS.ErrnoException).code !== 'EPERM') {\n          throw error;\n        }\n      }\n    });\n\n    it('resolves nested symlink chains completely', async () => {\n      try {\n        // Setup: Create target file in forbidden area\n        const actualTarget = path.join(forbiddenDir, 'target-file.txt');\n        await fs.writeFile(actualTarget, 'FINAL_CONTENT');\n\n        // Create chain of symlinks: allowedFile -> link2 -> link1 -> actualTarget\n        const link1 = path.join(testDir, 'intermediate-link1');\n        const link2 = path.join(testDir, 'intermediate-link2');\n        const allowedFile = path.join(allowedDir, 'seemingly-safe-file');\n\n        await fs.symlink(actualTarget, link1);\n        await fs.symlink(link1, link2);\n        await fs.symlink(link2, allowedFile);\n\n        // The allowed file path passes basic validation\n        expect(isPathWithinAllowedDirectories(allowedFile, [allowedDir])).toBe(true);\n\n        // But complete resolution reveals the forbidden target\n        const fullyResolvedPath = await fs.realpath(allowedFile);\n        expect(isPathWithinAllowedDirectories(fullyResolvedPath, [allowedDir])).toBe(false);\n        expect(await fs.realpath(fullyResolvedPath)).toBe(await fs.realpath(actualTarget));\n\n      } catch (error) {\n        // Skip if no symlink permissions on the system\n        if ((error as NodeJS.ErrnoException).code !== 'EPERM') {\n          throw error;\n        }\n      }\n    });\n  });\n\n  describe('Path Validation Race Condition Tests', () => {\n    let testDir: string;\n    let allowedDir: string;\n    let forbiddenDir: string;\n    let targetFile: string;\n    let testPath: string;\n\n    beforeEach(async () => {\n      testDir = await fs.mkdtemp(path.join(os.tmpdir(), 'race-test-'));\n      allowedDir = path.join(testDir, 'allowed');\n      forbiddenDir = path.join(testDir, 'outside');\n      targetFile = path.join(forbiddenDir, 'target.txt');\n      testPath = path.join(allowedDir, 'test.txt');\n\n      await fs.mkdir(allowedDir, { recursive: true });\n      await fs.mkdir(forbiddenDir, { recursive: true });\n      await fs.writeFile(targetFile, 'ORIGINAL CONTENT', 'utf-8');\n    });\n\n    afterEach(async () => {\n      await fs.rm(testDir, { recursive: true, force: true });\n    });\n\n    it('validates non-existent file paths based on parent directory', async () => {\n      const allowed = [allowedDir];\n\n      expect(isPathWithinAllowedDirectories(testPath, allowed)).toBe(true);\n      await expect(fs.access(testPath)).rejects.toThrow();\n\n      const parentDir = path.dirname(testPath);\n      expect(isPathWithinAllowedDirectories(parentDir, allowed)).toBe(true);\n    });\n\n    it('demonstrates symlink race condition allows writing outside allowed directories', async () => {\n      const symlinkSupported = await getSymlinkSupport();\n      if (!symlinkSupported) {\n        console.log('   ⏭️  Skipping symlink race condition test - symlinks not supported');\n        return;\n      }\n\n      const allowed = [allowedDir];\n\n      await expect(fs.access(testPath)).rejects.toThrow();\n      expect(isPathWithinAllowedDirectories(testPath, allowed)).toBe(true);\n\n      await fs.symlink(targetFile, testPath);\n      await fs.writeFile(testPath, 'MODIFIED CONTENT', 'utf-8');\n\n      const targetContent = await fs.readFile(targetFile, 'utf-8');\n      expect(targetContent).toBe('MODIFIED CONTENT');\n\n      const resolvedPath = await fs.realpath(testPath);\n      expect(isPathWithinAllowedDirectories(resolvedPath, allowed)).toBe(false);\n    });\n\n    it('shows timing differences between validation approaches', async () => {\n      const symlinkSupported = await getSymlinkSupport();\n      if (!symlinkSupported) {\n        console.log('   ⏭️  Skipping timing validation test - symlinks not supported');\n        return;\n      }\n\n      const allowed = [allowedDir];\n\n      const validation1 = isPathWithinAllowedDirectories(testPath, allowed);\n      expect(validation1).toBe(true);\n\n      await fs.symlink(targetFile, testPath);\n\n      const resolvedPath = await fs.realpath(testPath);\n      const validation2 = isPathWithinAllowedDirectories(resolvedPath, allowed);\n      expect(validation2).toBe(false);\n\n      expect(validation1).not.toBe(validation2);\n    });\n\n    it('validates directory creation timing', async () => {\n      const symlinkSupported = await getSymlinkSupport();\n      if (!symlinkSupported) {\n        console.log('   ⏭️  Skipping directory creation timing test - symlinks not supported');\n        return;\n      }\n\n      const allowed = [allowedDir];\n      const testDir = path.join(allowedDir, 'newdir');\n\n      expect(isPathWithinAllowedDirectories(testDir, allowed)).toBe(true);\n\n      await fs.symlink(forbiddenDir, testDir);\n\n      expect(isPathWithinAllowedDirectories(testDir, allowed)).toBe(true);\n\n      const resolved = await fs.realpath(testDir);\n      expect(isPathWithinAllowedDirectories(resolved, allowed)).toBe(false);\n    });\n\n    it('demonstrates exclusive file creation behavior', async () => {\n      const symlinkSupported = await getSymlinkSupport();\n      if (!symlinkSupported) {\n        console.log('   ⏭️  Skipping exclusive file creation test - symlinks not supported');\n        return;\n      }\n\n      const allowed = [allowedDir];\n\n      await fs.symlink(targetFile, testPath);\n\n      await expect(fs.open(testPath, 'wx')).rejects.toThrow(/EEXIST/);\n\n      await fs.writeFile(testPath, 'NEW CONTENT', 'utf-8');\n      const targetContent = await fs.readFile(targetFile, 'utf-8');\n      expect(targetContent).toBe('NEW CONTENT');\n    });\n\n    it('should use resolved parent paths for non-existent files', async () => {\n      const symlinkSupported = await getSymlinkSupport();\n      if (!symlinkSupported) {\n        console.log('   ⏭️  Skipping resolved parent paths test - symlinks not supported');\n        return;\n      }\n\n      const allowed = [allowedDir];\n\n      const symlinkDir = path.join(allowedDir, 'link');\n      await fs.symlink(forbiddenDir, symlinkDir);\n\n      const fileThroughSymlink = path.join(symlinkDir, 'newfile.txt');\n\n      expect(fileThroughSymlink.startsWith(allowedDir)).toBe(true);\n\n      const parentDir = path.dirname(fileThroughSymlink);\n      const resolvedParent = await fs.realpath(parentDir);\n      expect(isPathWithinAllowedDirectories(resolvedParent, allowed)).toBe(false);\n\n      const expectedSafePath = path.join(resolvedParent, path.basename(fileThroughSymlink));\n      expect(isPathWithinAllowedDirectories(expectedSafePath, allowed)).toBe(false);\n    });\n\n    it('demonstrates parent directory symlink traversal', async () => {\n      const symlinkSupported = await getSymlinkSupport();\n      if (!symlinkSupported) {\n        console.log('   ⏭️  Skipping parent directory symlink traversal test - symlinks not supported');\n        return;\n      }\n\n      const allowed = [allowedDir];\n      const deepPath = path.join(allowedDir, 'sub1', 'sub2', 'file.txt');\n\n      expect(isPathWithinAllowedDirectories(deepPath, allowed)).toBe(true);\n\n      const sub1Path = path.join(allowedDir, 'sub1');\n      await fs.symlink(forbiddenDir, sub1Path);\n\n      await fs.mkdir(path.join(sub1Path, 'sub2'), { recursive: true });\n      await fs.writeFile(deepPath, 'CONTENT', 'utf-8');\n\n      const realPath = await fs.realpath(deepPath);\n      const realAllowedDir = await fs.realpath(allowedDir);\n      const realForbiddenDir = await fs.realpath(forbiddenDir);\n\n      expect(realPath.startsWith(realAllowedDir)).toBe(false);\n      expect(realPath.startsWith(realForbiddenDir)).toBe(true);\n    });\n\n    it('should prevent race condition between validatePath and file operation', async () => {\n      const symlinkSupported = await getSymlinkSupport();\n      if (!symlinkSupported) {\n        console.log('   ⏭️  Skipping race condition prevention test - symlinks not supported');\n        return;\n      }\n\n      const allowed = [allowedDir];\n      const racePath = path.join(allowedDir, 'race-file.txt');\n      const targetFile = path.join(forbiddenDir, 'target.txt');\n\n      await fs.writeFile(targetFile, 'ORIGINAL CONTENT', 'utf-8');\n\n      // Path validation would pass (file doesn't exist, parent is in allowed dir)\n      expect(await fs.access(racePath).then(() => false).catch(() => true)).toBe(true);\n      expect(isPathWithinAllowedDirectories(racePath, allowed)).toBe(true);\n\n      // Race condition: symlink created after validation but before write\n      await fs.symlink(targetFile, racePath);\n\n      // With exclusive write flag, write should fail on symlink\n      await expect(\n        fs.writeFile(racePath, 'NEW CONTENT', { encoding: 'utf-8', flag: 'wx' })\n      ).rejects.toThrow(/EEXIST/);\n\n      // Verify content unchanged\n      const targetContent = await fs.readFile(targetFile, 'utf-8');\n      expect(targetContent).toBe('ORIGINAL CONTENT');\n\n      // The symlink exists but write was blocked\n      const actualWritePath = await fs.realpath(racePath);\n      expect(actualWritePath).toBe(await fs.realpath(targetFile));\n      expect(isPathWithinAllowedDirectories(actualWritePath, allowed)).toBe(false);\n    });\n\n    it('should allow overwrites to legitimate files within allowed directories', async () => {\n      const allowed = [allowedDir];\n      const legitFile = path.join(allowedDir, 'legit-file.txt');\n\n      // Create a legitimate file\n      await fs.writeFile(legitFile, 'ORIGINAL', 'utf-8');\n\n      // Opening with w should work for legitimate files\n      const fd = await fs.open(legitFile, 'w');\n      try {\n        await fd.write('UPDATED', 0, 'utf-8');\n      } finally {\n        await fd.close();\n      }\n\n      const content = await fs.readFile(legitFile, 'utf-8');\n      expect(content).toBe('UPDATED');\n    });\n\n    it('should handle symlinks that point within allowed directories', async () => {\n      const symlinkSupported = await getSymlinkSupport();\n      if (!symlinkSupported) {\n        console.log('   ⏭️  Skipping symlinks within allowed directories test - symlinks not supported');\n        return;\n      }\n\n      const allowed = [allowedDir];\n      const targetFile = path.join(allowedDir, 'target.txt');\n      const symlinkPath = path.join(allowedDir, 'symlink.txt');\n\n      // Create target file within allowed directory\n      await fs.writeFile(targetFile, 'TARGET CONTENT', 'utf-8');\n\n      // Create symlink pointing to allowed file\n      await fs.symlink(targetFile, symlinkPath);\n\n      // Opening symlink with w follows it to the target\n      const fd = await fs.open(symlinkPath, 'w');\n      try {\n        await fd.write('UPDATED VIA SYMLINK', 0, 'utf-8');\n      } finally {\n        await fd.close();\n      }\n\n      // Both symlink and target should show updated content\n      const symlinkContent = await fs.readFile(symlinkPath, 'utf-8');\n      const targetContent = await fs.readFile(targetFile, 'utf-8');\n      expect(symlinkContent).toBe('UPDATED VIA SYMLINK');\n      expect(targetContent).toBe('UPDATED VIA SYMLINK');\n    });\n\n    it('should prevent overwriting files through symlinks pointing outside allowed directories', async () => {\n      const symlinkSupported = await getSymlinkSupport();\n      if (!symlinkSupported) {\n        console.log('   ⏭️  Skipping symlink overwrite prevention test - symlinks not supported');\n        return;\n      }\n\n      const allowed = [allowedDir];\n      const legitFile = path.join(allowedDir, 'existing.txt');\n      const targetFile = path.join(forbiddenDir, 'target.txt');\n\n      // Create a legitimate file first\n      await fs.writeFile(legitFile, 'LEGIT CONTENT', 'utf-8');\n\n      // Create target file in forbidden directory\n      await fs.writeFile(targetFile, 'FORBIDDEN CONTENT', 'utf-8');\n\n      // Now replace the legitimate file with a symlink to forbidden location\n      await fs.unlink(legitFile);\n      await fs.symlink(targetFile, legitFile);\n\n      // Simulate the server's validation logic\n      const stats = await fs.lstat(legitFile);\n      expect(stats.isSymbolicLink()).toBe(true);\n\n      const realPath = await fs.realpath(legitFile);\n      expect(isPathWithinAllowedDirectories(realPath, allowed)).toBe(false);\n\n      // With atomic rename, symlinks are replaced not followed\n      // So this test now demonstrates the protection\n\n      // Verify content remains unchanged\n      const targetContent = await fs.readFile(targetFile, 'utf-8');\n      expect(targetContent).toBe('FORBIDDEN CONTENT');\n    });\n\n    it('demonstrates race condition in read operations', async () => {\n      const symlinkSupported = await getSymlinkSupport();\n      if (!symlinkSupported) {\n        console.log('   ⏭️  Skipping race condition in read operations test - symlinks not supported');\n        return;\n      }\n\n      const allowed = [allowedDir];\n      const legitFile = path.join(allowedDir, 'readable.txt');\n      const secretFile = path.join(forbiddenDir, 'secret.txt');\n\n      // Create legitimate file\n      await fs.writeFile(legitFile, 'PUBLIC CONTENT', 'utf-8');\n\n      // Create secret file in forbidden directory\n      await fs.writeFile(secretFile, 'SECRET CONTENT', 'utf-8');\n\n      // Step 1: validatePath would pass for legitimate file\n      expect(isPathWithinAllowedDirectories(legitFile, allowed)).toBe(true);\n\n      // Step 2: Race condition - replace file with symlink after validation\n      await fs.unlink(legitFile);\n      await fs.symlink(secretFile, legitFile);\n\n      // Step 3: Read operation follows symlink to forbidden location\n      const content = await fs.readFile(legitFile, 'utf-8');\n\n      // This shows the vulnerability - we read forbidden content\n      expect(content).toBe('SECRET CONTENT');\n      expect(isPathWithinAllowedDirectories(await fs.realpath(legitFile), allowed)).toBe(false);\n    });\n\n    it('verifies rename does not follow symlinks', async () => {\n      const symlinkSupported = await getSymlinkSupport();\n      if (!symlinkSupported) {\n        console.log('   ⏭️  Skipping rename symlink test - symlinks not supported');\n        return;\n      }\n\n      const allowed = [allowedDir];\n      const tempFile = path.join(allowedDir, 'temp.txt');\n      const targetSymlink = path.join(allowedDir, 'target-symlink.txt');\n      const forbiddenTarget = path.join(forbiddenDir, 'forbidden-target.txt');\n\n      // Create forbidden target\n      await fs.writeFile(forbiddenTarget, 'ORIGINAL CONTENT', 'utf-8');\n\n      // Create symlink pointing to forbidden location\n      await fs.symlink(forbiddenTarget, targetSymlink);\n\n      // Write temp file\n      await fs.writeFile(tempFile, 'NEW CONTENT', 'utf-8');\n\n      // Rename temp file to symlink path\n      await fs.rename(tempFile, targetSymlink);\n\n      // Check what happened\n      const symlinkExists = await fs.lstat(targetSymlink).then(() => true).catch(() => false);\n      const isSymlink = symlinkExists && (await fs.lstat(targetSymlink)).isSymbolicLink();\n      const targetContent = await fs.readFile(targetSymlink, 'utf-8');\n      const forbiddenContent = await fs.readFile(forbiddenTarget, 'utf-8');\n\n      // Rename should replace the symlink with a regular file\n      expect(isSymlink).toBe(false);\n      expect(targetContent).toBe('NEW CONTENT');\n      expect(forbiddenContent).toBe('ORIGINAL CONTENT'); // Unchanged\n    });\n  });\n});\n"
  },
  {
    "path": "src/filesystem/__tests__/roots-utils.test.ts",
    "content": "import { describe, it, expect, beforeEach, afterEach } from 'vitest';\nimport { getValidRootDirectories } from '../roots-utils.js';\nimport { mkdtempSync, rmSync, mkdirSync, writeFileSync, realpathSync } from 'fs';\nimport { tmpdir } from 'os';\nimport { join } from 'path';\nimport type { Root } from '@modelcontextprotocol/sdk/types.js';\n\ndescribe('getValidRootDirectories', () => {\n  let testDir1: string;\n  let testDir2: string;\n  let testDir3: string;\n  let testFile: string;\n\n  beforeEach(() => {\n    // Create test directories\n    testDir1 = realpathSync(mkdtempSync(join(tmpdir(), 'mcp-roots-test1-')));\n    testDir2 = realpathSync(mkdtempSync(join(tmpdir(), 'mcp-roots-test2-')));\n    testDir3 = realpathSync(mkdtempSync(join(tmpdir(), 'mcp-roots-test3-')));\n\n    // Create a test file (not a directory)\n    testFile = join(testDir1, 'test-file.txt');\n    writeFileSync(testFile, 'test content');\n  });\n\n  afterEach(() => {\n    // Cleanup\n    rmSync(testDir1, { recursive: true, force: true });\n    rmSync(testDir2, { recursive: true, force: true });\n    rmSync(testDir3, { recursive: true, force: true });\n  });\n\n  describe('valid directory processing', () => {\n    it('should process all URI formats and edge cases', async () => {\n      const roots = [\n        { uri: `file://${testDir1}`, name: 'File URI' },\n        { uri: testDir2, name: 'Plain path' },\n        { uri: testDir3 } // Plain path without name property\n      ];\n\n      const result = await getValidRootDirectories(roots);\n\n      expect(result).toContain(testDir1);\n      expect(result).toContain(testDir2);\n      expect(result).toContain(testDir3);\n      expect(result).toHaveLength(3);\n    });\n\n    it('should normalize complex paths', async () => {\n      const subDir = join(testDir1, 'subdir');\n      mkdirSync(subDir);\n      \n      const roots = [\n        { uri: `file://${testDir1}/./subdir/../subdir`, name: 'Complex Path' }\n      ];\n\n      const result = await getValidRootDirectories(roots);\n\n      expect(result).toHaveLength(1);\n      expect(result[0]).toBe(subDir);\n    });\n  });\n\n  describe('error handling', () => {\n\n    it('should handle various error types', async () => {\n      const nonExistentDir = join(tmpdir(), 'non-existent-directory-12345');\n      const invalidPath = '\\0invalid\\0path'; // Null bytes cause different error types\n      const roots = [\n        { uri: `file://${testDir1}`, name: 'Valid Dir' },\n        { uri: `file://${nonExistentDir}`, name: 'Non-existent Dir' },\n        { uri: `file://${testFile}`, name: 'File Not Dir' },\n        { uri: `file://${invalidPath}`, name: 'Invalid Path' }\n      ];\n\n      const result = await getValidRootDirectories(roots);\n\n      expect(result).toContain(testDir1);\n      expect(result).not.toContain(nonExistentDir);\n      expect(result).not.toContain(testFile);\n      expect(result).not.toContain(invalidPath);\n      expect(result).toHaveLength(1);\n    });\n  });\n});"
  },
  {
    "path": "src/filesystem/__tests__/startup-validation.test.ts",
    "content": "import { describe, it, expect, beforeEach, afterEach } from 'vitest';\nimport { spawn } from 'child_process';\nimport * as path from 'path';\nimport * as fs from 'fs/promises';\nimport * as os from 'os';\n\nconst SERVER_PATH = path.join(__dirname, '..', 'dist', 'index.js');\n\n/**\n * Spawns the filesystem server with given arguments and returns exit info\n */\nasync function spawnServer(args: string[], timeoutMs = 2000): Promise<{ exitCode: number | null; stderr: string }> {\n  return new Promise((resolve) => {\n    const proc = spawn('node', [SERVER_PATH, ...args], {\n      stdio: ['pipe', 'pipe', 'pipe'],\n    });\n\n    let stderr = '';\n    proc.stderr?.on('data', (data) => {\n      stderr += data.toString();\n    });\n\n    const timeout = setTimeout(() => {\n      proc.kill('SIGTERM');\n    }, timeoutMs);\n\n    proc.on('close', (code) => {\n      clearTimeout(timeout);\n      resolve({ exitCode: code, stderr });\n    });\n\n    proc.on('error', (err) => {\n      clearTimeout(timeout);\n      resolve({ exitCode: 1, stderr: err.message });\n    });\n  });\n}\n\ndescribe('Startup Directory Validation', () => {\n  let testDir: string;\n  let accessibleDir: string;\n  let accessibleDir2: string;\n\n  beforeEach(async () => {\n    testDir = await fs.mkdtemp(path.join(os.tmpdir(), 'fs-startup-test-'));\n    accessibleDir = path.join(testDir, 'accessible');\n    accessibleDir2 = path.join(testDir, 'accessible2');\n    await fs.mkdir(accessibleDir, { recursive: true });\n    await fs.mkdir(accessibleDir2, { recursive: true });\n  });\n\n  afterEach(async () => {\n    await fs.rm(testDir, { recursive: true, force: true });\n  });\n\n  it('should start successfully with all accessible directories', async () => {\n    const result = await spawnServer([accessibleDir, accessibleDir2]);\n    // Server starts and runs (we kill it after timeout, so exit code is null or from SIGTERM)\n    expect(result.stderr).toContain('Secure MCP Filesystem Server running on stdio');\n    expect(result.stderr).not.toContain('Error:');\n  });\n\n  it('should skip inaccessible directory and continue with accessible one', async () => {\n    const nonExistentDir = path.join(testDir, 'non-existent-dir-12345');\n\n    const result = await spawnServer([nonExistentDir, accessibleDir]);\n\n    // Should warn about inaccessible directory\n    expect(result.stderr).toContain('Warning: Cannot access directory');\n    expect(result.stderr).toContain(nonExistentDir);\n\n    // Should still start successfully\n    expect(result.stderr).toContain('Secure MCP Filesystem Server running on stdio');\n  });\n\n  it('should exit with error when ALL directories are inaccessible', async () => {\n    const nonExistent1 = path.join(testDir, 'non-existent-1');\n    const nonExistent2 = path.join(testDir, 'non-existent-2');\n\n    const result = await spawnServer([nonExistent1, nonExistent2]);\n\n    // Should exit with error\n    expect(result.exitCode).toBe(1);\n    expect(result.stderr).toContain('Error: None of the specified directories are accessible');\n  });\n\n  it('should warn when path is not a directory', async () => {\n    const filePath = path.join(testDir, 'not-a-directory.txt');\n    await fs.writeFile(filePath, 'content');\n\n    const result = await spawnServer([filePath, accessibleDir]);\n\n    // Should warn about non-directory\n    expect(result.stderr).toContain('Warning:');\n    expect(result.stderr).toContain('not a directory');\n\n    // Should still start with the valid directory\n    expect(result.stderr).toContain('Secure MCP Filesystem Server running on stdio');\n  });\n});\n"
  },
  {
    "path": "src/filesystem/__tests__/structured-content.test.ts",
    "content": "import { describe, it, expect, beforeEach, afterEach } from 'vitest';\nimport * as fs from 'fs/promises';\nimport * as path from 'path';\nimport * as os from 'os';\nimport { Client } from '@modelcontextprotocol/sdk/client/index.js';\nimport { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js';\nimport { spawn } from 'child_process';\n\n/**\n * Integration tests to verify that tool handlers return structuredContent\n * that matches the declared outputSchema.\n *\n * These tests address issues #3110, #3106, #3093 where tools were returning\n * structuredContent: { content: [contentBlock] } (array) instead of\n * structuredContent: { content: string } as declared in outputSchema.\n */\ndescribe('structuredContent schema compliance', () => {\n  let client: Client;\n  let transport: StdioClientTransport;\n  let testDir: string;\n\n  beforeEach(async () => {\n    // Create a temp directory for testing\n    testDir = await fs.mkdtemp(path.join(os.tmpdir(), 'mcp-fs-test-'));\n\n    // Create test files\n    await fs.writeFile(path.join(testDir, 'test.txt'), 'test content');\n    await fs.mkdir(path.join(testDir, 'subdir'));\n    await fs.writeFile(path.join(testDir, 'subdir', 'nested.txt'), 'nested content');\n\n    // Start the MCP server\n    const serverPath = path.resolve(__dirname, '../dist/index.js');\n    transport = new StdioClientTransport({\n      command: 'node',\n      args: [serverPath, testDir],\n    });\n\n    client = new Client({\n      name: 'test-client',\n      version: '1.0.0',\n    }, {\n      capabilities: {}\n    });\n\n    await client.connect(transport);\n  });\n\n  afterEach(async () => {\n    await client?.close();\n    await fs.rm(testDir, { recursive: true, force: true });\n  });\n\n  describe('directory_tree', () => {\n    it('should return structuredContent.content as a string, not an array', async () => {\n      const result = await client.callTool({\n        name: 'directory_tree',\n        arguments: { path: testDir }\n      });\n\n      // The result should have structuredContent\n      expect(result.structuredContent).toBeDefined();\n\n      // structuredContent.content should be a string (matching outputSchema: { content: z.string() })\n      const structuredContent = result.structuredContent as { content: unknown };\n      expect(typeof structuredContent.content).toBe('string');\n\n      // It should NOT be an array\n      expect(Array.isArray(structuredContent.content)).toBe(false);\n\n      // The content should be valid JSON representing the tree\n      const treeData = JSON.parse(structuredContent.content as string);\n      expect(Array.isArray(treeData)).toBe(true);\n    });\n  });\n\n  describe('list_directory_with_sizes', () => {\n    it('should return structuredContent.content as a string, not an array', async () => {\n      const result = await client.callTool({\n        name: 'list_directory_with_sizes',\n        arguments: { path: testDir }\n      });\n\n      // The result should have structuredContent\n      expect(result.structuredContent).toBeDefined();\n\n      // structuredContent.content should be a string (matching outputSchema: { content: z.string() })\n      const structuredContent = result.structuredContent as { content: unknown };\n      expect(typeof structuredContent.content).toBe('string');\n\n      // It should NOT be an array\n      expect(Array.isArray(structuredContent.content)).toBe(false);\n\n      // The content should contain directory listing info\n      expect(structuredContent.content).toContain('[FILE]');\n    });\n  });\n\n  describe('move_file', () => {\n    it('should return structuredContent.content as a string, not an array', async () => {\n      const sourcePath = path.join(testDir, 'test.txt');\n      const destPath = path.join(testDir, 'moved.txt');\n\n      const result = await client.callTool({\n        name: 'move_file',\n        arguments: {\n          source: sourcePath,\n          destination: destPath\n        }\n      });\n\n      // The result should have structuredContent\n      expect(result.structuredContent).toBeDefined();\n\n      // structuredContent.content should be a string (matching outputSchema: { content: z.string() })\n      const structuredContent = result.structuredContent as { content: unknown };\n      expect(typeof structuredContent.content).toBe('string');\n\n      // It should NOT be an array\n      expect(Array.isArray(structuredContent.content)).toBe(false);\n\n      // The content should contain success message\n      expect(structuredContent.content).toContain('Successfully moved');\n    });\n  });\n\n  describe('list_directory (control - already working)', () => {\n    it('should return structuredContent.content as a string', async () => {\n      const result = await client.callTool({\n        name: 'list_directory',\n        arguments: { path: testDir }\n      });\n\n      expect(result.structuredContent).toBeDefined();\n\n      const structuredContent = result.structuredContent as { content: unknown };\n      expect(typeof structuredContent.content).toBe('string');\n      expect(Array.isArray(structuredContent.content)).toBe(false);\n    });\n  });\n\n  describe('search_files (control - already working)', () => {\n    it('should return structuredContent.content as a string', async () => {\n      const result = await client.callTool({\n        name: 'search_files',\n        arguments: {\n          path: testDir,\n          pattern: '*.txt'\n        }\n      });\n\n      expect(result.structuredContent).toBeDefined();\n\n      const structuredContent = result.structuredContent as { content: unknown };\n      expect(typeof structuredContent.content).toBe('string');\n      expect(Array.isArray(structuredContent.content)).toBe(false);\n    });\n  });\n});\n"
  },
  {
    "path": "src/filesystem/index.ts",
    "content": "#!/usr/bin/env node\n\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { StdioServerTransport } from \"@modelcontextprotocol/sdk/server/stdio.js\";\nimport {\n  CallToolResult,\n  RootsListChangedNotificationSchema,\n  type Root,\n} from \"@modelcontextprotocol/sdk/types.js\";\nimport fs from \"fs/promises\";\nimport { createReadStream } from \"fs\";\nimport path from \"path\";\nimport { z } from \"zod\";\nimport { minimatch } from \"minimatch\";\nimport { normalizePath, expandHome } from './path-utils.js';\nimport { getValidRootDirectories } from './roots-utils.js';\nimport {\n  // Function imports\n  formatSize,\n  validatePath,\n  getFileStats,\n  readFileContent,\n  writeFileContent,\n  searchFilesWithValidation,\n  applyFileEdits,\n  tailFile,\n  headFile,\n  setAllowedDirectories,\n} from './lib.js';\n\n// Command line argument parsing\nconst args = process.argv.slice(2);\nif (args.length === 0) {\n  console.error(\"Usage: mcp-server-filesystem [allowed-directory] [additional-directories...]\");\n  console.error(\"Note: Allowed directories can be provided via:\");\n  console.error(\"  1. Command-line arguments (shown above)\");\n  console.error(\"  2. MCP roots protocol (if client supports it)\");\n  console.error(\"At least one directory must be provided by EITHER method for the server to operate.\");\n}\n\n// Store allowed directories in normalized and resolved form\n// We store BOTH the original path AND the resolved path to handle symlinks correctly\n// This fixes the macOS /tmp -> /private/tmp symlink issue where users specify /tmp\n// but the resolved path is /private/tmp\nlet allowedDirectories = (await Promise.all(\n  args.map(async (dir) => {\n    const expanded = expandHome(dir);\n    const absolute = path.resolve(expanded);\n    const normalizedOriginal = normalizePath(absolute);\n    try {\n      // Security: Resolve symlinks in allowed directories during startup\n      // This ensures we know the real paths and can validate against them later\n      const resolved = await fs.realpath(absolute);\n      const normalizedResolved = normalizePath(resolved);\n      // Return both original and resolved paths if they differ\n      // This allows matching against either /tmp or /private/tmp on macOS\n      if (normalizedOriginal !== normalizedResolved) {\n        return [normalizedOriginal, normalizedResolved];\n      }\n      return [normalizedResolved];\n    } catch (error) {\n      // If we can't resolve (doesn't exist), use the normalized absolute path\n      // This allows configuring allowed dirs that will be created later\n      return [normalizedOriginal];\n    }\n  })\n)).flat();\n\n// Filter to only accessible directories, warn about inaccessible ones\nconst accessibleDirectories: string[] = [];\nfor (const dir of allowedDirectories) {\n  try {\n    const stats = await fs.stat(dir);\n    if (stats.isDirectory()) {\n      accessibleDirectories.push(dir);\n    } else {\n      console.error(`Warning: ${dir} is not a directory, skipping`);\n    }\n  } catch (error) {\n    console.error(`Warning: Cannot access directory ${dir}, skipping`);\n  }\n}\n\n// Exit only if ALL paths are inaccessible (and some were specified)\nif (accessibleDirectories.length === 0 && allowedDirectories.length > 0) {\n  console.error(\"Error: None of the specified directories are accessible\");\n  process.exit(1);\n}\n\nallowedDirectories = accessibleDirectories;\n\n// Initialize the global allowedDirectories in lib.ts\nsetAllowedDirectories(allowedDirectories);\n\n// Schema definitions\nconst ReadTextFileArgsSchema = z.object({\n  path: z.string(),\n  tail: z.number().optional().describe('If provided, returns only the last N lines of the file'),\n  head: z.number().optional().describe('If provided, returns only the first N lines of the file')\n});\n\nconst ReadMediaFileArgsSchema = z.object({\n  path: z.string()\n});\n\nconst ReadMultipleFilesArgsSchema = z.object({\n  paths: z\n    .array(z.string())\n    .min(1, \"At least one file path must be provided\")\n    .describe(\"Array of file paths to read. Each path must be a string pointing to a valid file within allowed directories.\"),\n});\n\nconst WriteFileArgsSchema = z.object({\n  path: z.string(),\n  content: z.string(),\n});\n\nconst EditOperation = z.object({\n  oldText: z.string().describe('Text to search for - must match exactly'),\n  newText: z.string().describe('Text to replace with')\n});\n\nconst EditFileArgsSchema = z.object({\n  path: z.string(),\n  edits: z.array(EditOperation),\n  dryRun: z.boolean().default(false).describe('Preview changes using git-style diff format')\n});\n\nconst CreateDirectoryArgsSchema = z.object({\n  path: z.string(),\n});\n\nconst ListDirectoryArgsSchema = z.object({\n  path: z.string(),\n});\n\nconst ListDirectoryWithSizesArgsSchema = z.object({\n  path: z.string(),\n  sortBy: z.enum(['name', 'size']).optional().default('name').describe('Sort entries by name or size'),\n});\n\nconst DirectoryTreeArgsSchema = z.object({\n  path: z.string(),\n  excludePatterns: z.array(z.string()).optional().default([])\n});\n\nconst MoveFileArgsSchema = z.object({\n  source: z.string(),\n  destination: z.string(),\n});\n\nconst SearchFilesArgsSchema = z.object({\n  path: z.string(),\n  pattern: z.string(),\n  excludePatterns: z.array(z.string()).optional().default([])\n});\n\nconst GetFileInfoArgsSchema = z.object({\n  path: z.string(),\n});\n\n// Server setup\nconst server = new McpServer(\n  {\n    name: \"secure-filesystem-server\",\n    version: \"0.2.0\",\n  }\n);\n\n// Reads a file as a stream of buffers, concatenates them, and then encodes\n// the result to a Base64 string. This is a memory-efficient way to handle\n// binary data from a stream before the final encoding.\nasync function readFileAsBase64Stream(filePath: string): Promise<string> {\n  return new Promise((resolve, reject) => {\n    const stream = createReadStream(filePath);\n    const chunks: Buffer[] = [];\n    stream.on('data', (chunk) => {\n      chunks.push(chunk as Buffer);\n    });\n    stream.on('end', () => {\n      const finalBuffer = Buffer.concat(chunks);\n      resolve(finalBuffer.toString('base64'));\n    });\n    stream.on('error', (err) => reject(err));\n  });\n}\n\n// Tool registrations\n\n// read_file (deprecated) and read_text_file\nconst readTextFileHandler = async (args: z.infer<typeof ReadTextFileArgsSchema>) => {\n  const validPath = await validatePath(args.path);\n\n  if (args.head && args.tail) {\n    throw new Error(\"Cannot specify both head and tail parameters simultaneously\");\n  }\n\n  let content: string;\n  if (args.tail) {\n    content = await tailFile(validPath, args.tail);\n  } else if (args.head) {\n    content = await headFile(validPath, args.head);\n  } else {\n    content = await readFileContent(validPath);\n  }\n\n  return {\n    content: [{ type: \"text\" as const, text: content }],\n    structuredContent: { content }\n  };\n};\n\nserver.registerTool(\n  \"read_file\",\n  {\n    title: \"Read File (Deprecated)\",\n    description: \"Read the complete contents of a file as text. DEPRECATED: Use read_text_file instead.\",\n    inputSchema: ReadTextFileArgsSchema.shape,\n    outputSchema: { content: z.string() },\n    annotations: { readOnlyHint: true }\n  },\n  readTextFileHandler\n);\n\nserver.registerTool(\n  \"read_text_file\",\n  {\n    title: \"Read Text File\",\n    description:\n      \"Read the complete contents of a file from the file system as text. \" +\n      \"Handles various text encodings and provides detailed error messages \" +\n      \"if the file cannot be read. Use this tool when you need to examine \" +\n      \"the contents of a single file. Use the 'head' parameter to read only \" +\n      \"the first N lines of a file, or the 'tail' parameter to read only \" +\n      \"the last N lines of a file. Operates on the file as text regardless of extension. \" +\n      \"Only works within allowed directories.\",\n    inputSchema: {\n      path: z.string(),\n      tail: z.number().optional().describe(\"If provided, returns only the last N lines of the file\"),\n      head: z.number().optional().describe(\"If provided, returns only the first N lines of the file\")\n    },\n    outputSchema: { content: z.string() },\n    annotations: { readOnlyHint: true }\n  },\n  readTextFileHandler\n);\n\nserver.registerTool(\n  \"read_media_file\",\n  {\n    title: \"Read Media File\",\n    description:\n      \"Read an image or audio file. Returns the base64 encoded data and MIME type. \" +\n      \"Only works within allowed directories.\",\n    inputSchema: {\n      path: z.string()\n    },\n    outputSchema: {\n      content: z.array(z.object({\n        type: z.enum([\"image\", \"audio\", \"blob\"]),\n        data: z.string(),\n        mimeType: z.string()\n      }))\n    },\n    annotations: { readOnlyHint: true }\n  },\n  async (args: z.infer<typeof ReadMediaFileArgsSchema>) => {\n    const validPath = await validatePath(args.path);\n    const extension = path.extname(validPath).toLowerCase();\n    const mimeTypes: Record<string, string> = {\n      \".png\": \"image/png\",\n      \".jpg\": \"image/jpeg\",\n      \".jpeg\": \"image/jpeg\",\n      \".gif\": \"image/gif\",\n      \".webp\": \"image/webp\",\n      \".bmp\": \"image/bmp\",\n      \".svg\": \"image/svg+xml\",\n      \".mp3\": \"audio/mpeg\",\n      \".wav\": \"audio/wav\",\n      \".ogg\": \"audio/ogg\",\n      \".flac\": \"audio/flac\",\n    };\n    const mimeType = mimeTypes[extension] || \"application/octet-stream\";\n    const data = await readFileAsBase64Stream(validPath);\n\n    const type = mimeType.startsWith(\"image/\")\n      ? \"image\"\n      : mimeType.startsWith(\"audio/\")\n        ? \"audio\"\n        // Fallback for other binary types, not officially supported by the spec but has been used for some time\n        : \"blob\";\n    const contentItem = { type: type as 'image' | 'audio' | 'blob', data, mimeType };\n    return {\n      content: [contentItem],\n      structuredContent: { content: [contentItem] }\n    } as unknown as CallToolResult;\n  }\n);\n\nserver.registerTool(\n  \"read_multiple_files\",\n  {\n    title: \"Read Multiple Files\",\n    description:\n      \"Read the contents of multiple files simultaneously. This is more \" +\n      \"efficient than reading files one by one when you need to analyze \" +\n      \"or compare multiple files. Each file's content is returned with its \" +\n      \"path as a reference. Failed reads for individual files won't stop \" +\n      \"the entire operation. Only works within allowed directories.\",\n    inputSchema: {\n      paths: z.array(z.string())\n        .min(1)\n        .describe(\"Array of file paths to read. Each path must be a string pointing to a valid file within allowed directories.\")\n    },\n    outputSchema: { content: z.string() },\n    annotations: { readOnlyHint: true }\n  },\n  async (args: z.infer<typeof ReadMultipleFilesArgsSchema>) => {\n    const results = await Promise.all(\n      args.paths.map(async (filePath: string) => {\n        try {\n          const validPath = await validatePath(filePath);\n          const content = await readFileContent(validPath);\n          return `${filePath}:\\n${content}\\n`;\n        } catch (error) {\n          const errorMessage = error instanceof Error ? error.message : String(error);\n          return `${filePath}: Error - ${errorMessage}`;\n        }\n      }),\n    );\n    const text = results.join(\"\\n---\\n\");\n    return {\n      content: [{ type: \"text\" as const, text }],\n      structuredContent: { content: text }\n    };\n  }\n);\n\nserver.registerTool(\n  \"write_file\",\n  {\n    title: \"Write File\",\n    description:\n      \"Create a new file or completely overwrite an existing file with new content. \" +\n      \"Use with caution as it will overwrite existing files without warning. \" +\n      \"Handles text content with proper encoding. Only works within allowed directories.\",\n    inputSchema: {\n      path: z.string(),\n      content: z.string()\n    },\n    outputSchema: { content: z.string() },\n    annotations: { readOnlyHint: false, idempotentHint: true, destructiveHint: true }\n  },\n  async (args: z.infer<typeof WriteFileArgsSchema>) => {\n    const validPath = await validatePath(args.path);\n    await writeFileContent(validPath, args.content);\n    const text = `Successfully wrote to ${args.path}`;\n    return {\n      content: [{ type: \"text\" as const, text }],\n      structuredContent: { content: text }\n    };\n  }\n);\n\nserver.registerTool(\n  \"edit_file\",\n  {\n    title: \"Edit File\",\n    description:\n      \"Make line-based edits to a text file. Each edit replaces exact line sequences \" +\n      \"with new content. Returns a git-style diff showing the changes made. \" +\n      \"Only works within allowed directories.\",\n    inputSchema: {\n      path: z.string(),\n      edits: z.array(z.object({\n        oldText: z.string().describe(\"Text to search for - must match exactly\"),\n        newText: z.string().describe(\"Text to replace with\")\n      })),\n      dryRun: z.boolean().default(false).describe(\"Preview changes using git-style diff format\")\n    },\n    outputSchema: { content: z.string() },\n    annotations: { readOnlyHint: false, idempotentHint: false, destructiveHint: true }\n  },\n  async (args: z.infer<typeof EditFileArgsSchema>) => {\n    const validPath = await validatePath(args.path);\n    const result = await applyFileEdits(validPath, args.edits, args.dryRun);\n    return {\n      content: [{ type: \"text\" as const, text: result }],\n      structuredContent: { content: result }\n    };\n  }\n);\n\nserver.registerTool(\n  \"create_directory\",\n  {\n    title: \"Create Directory\",\n    description:\n      \"Create a new directory or ensure a directory exists. Can create multiple \" +\n      \"nested directories in one operation. If the directory already exists, \" +\n      \"this operation will succeed silently. Perfect for setting up directory \" +\n      \"structures for projects or ensuring required paths exist. Only works within allowed directories.\",\n    inputSchema: {\n      path: z.string()\n    },\n    outputSchema: { content: z.string() },\n    annotations: { readOnlyHint: false, idempotentHint: true, destructiveHint: false }\n  },\n  async (args: z.infer<typeof CreateDirectoryArgsSchema>) => {\n    const validPath = await validatePath(args.path);\n    await fs.mkdir(validPath, { recursive: true });\n    const text = `Successfully created directory ${args.path}`;\n    return {\n      content: [{ type: \"text\" as const, text }],\n      structuredContent: { content: text }\n    };\n  }\n);\n\nserver.registerTool(\n  \"list_directory\",\n  {\n    title: \"List Directory\",\n    description:\n      \"Get a detailed listing of all files and directories in a specified path. \" +\n      \"Results clearly distinguish between files and directories with [FILE] and [DIR] \" +\n      \"prefixes. This tool is essential for understanding directory structure and \" +\n      \"finding specific files within a directory. Only works within allowed directories.\",\n    inputSchema: {\n      path: z.string()\n    },\n    outputSchema: { content: z.string() },\n    annotations: { readOnlyHint: true }\n  },\n  async (args: z.infer<typeof ListDirectoryArgsSchema>) => {\n    const validPath = await validatePath(args.path);\n    const entries = await fs.readdir(validPath, { withFileTypes: true });\n    const formatted = entries\n      .map((entry) => `${entry.isDirectory() ? \"[DIR]\" : \"[FILE]\"} ${entry.name}`)\n      .join(\"\\n\");\n    return {\n      content: [{ type: \"text\" as const, text: formatted }],\n      structuredContent: { content: formatted }\n    };\n  }\n);\n\nserver.registerTool(\n  \"list_directory_with_sizes\",\n  {\n    title: \"List Directory with Sizes\",\n    description:\n      \"Get a detailed listing of all files and directories in a specified path, including sizes. \" +\n      \"Results clearly distinguish between files and directories with [FILE] and [DIR] \" +\n      \"prefixes. This tool is useful for understanding directory structure and \" +\n      \"finding specific files within a directory. Only works within allowed directories.\",\n    inputSchema: {\n      path: z.string(),\n      sortBy: z.enum([\"name\", \"size\"]).optional().default(\"name\").describe(\"Sort entries by name or size\")\n    },\n    outputSchema: { content: z.string() },\n    annotations: { readOnlyHint: true }\n  },\n  async (args: z.infer<typeof ListDirectoryWithSizesArgsSchema>) => {\n    const validPath = await validatePath(args.path);\n    const entries = await fs.readdir(validPath, { withFileTypes: true });\n\n    // Get detailed information for each entry\n    const detailedEntries = await Promise.all(\n      entries.map(async (entry) => {\n        const entryPath = path.join(validPath, entry.name);\n        try {\n          const stats = await fs.stat(entryPath);\n          return {\n            name: entry.name,\n            isDirectory: entry.isDirectory(),\n            size: stats.size,\n            mtime: stats.mtime\n          };\n        } catch (error) {\n          return {\n            name: entry.name,\n            isDirectory: entry.isDirectory(),\n            size: 0,\n            mtime: new Date(0)\n          };\n        }\n      })\n    );\n\n    // Sort entries based on sortBy parameter\n    const sortedEntries = [...detailedEntries].sort((a, b) => {\n      if (args.sortBy === 'size') {\n        return b.size - a.size; // Descending by size\n      }\n      // Default sort by name\n      return a.name.localeCompare(b.name);\n    });\n\n    // Format the output\n    const formattedEntries = sortedEntries.map(entry =>\n      `${entry.isDirectory ? \"[DIR]\" : \"[FILE]\"} ${entry.name.padEnd(30)} ${\n        entry.isDirectory ? \"\" : formatSize(entry.size).padStart(10)\n      }`\n    );\n\n    // Add summary\n    const totalFiles = detailedEntries.filter(e => !e.isDirectory).length;\n    const totalDirs = detailedEntries.filter(e => e.isDirectory).length;\n    const totalSize = detailedEntries.reduce((sum, entry) => sum + (entry.isDirectory ? 0 : entry.size), 0);\n\n    const summary = [\n      \"\",\n      `Total: ${totalFiles} files, ${totalDirs} directories`,\n      `Combined size: ${formatSize(totalSize)}`\n    ];\n\n    const text = [...formattedEntries, ...summary].join(\"\\n\");\n    const contentBlock = { type: \"text\" as const, text };\n    return {\n      content: [contentBlock],\n      structuredContent: { content: text }\n    };\n  }\n);\n\nserver.registerTool(\n  \"directory_tree\",\n  {\n    title: \"Directory Tree\",\n    description:\n      \"Get a recursive tree view of files and directories as a JSON structure. \" +\n      \"Each entry includes 'name', 'type' (file/directory), and 'children' for directories. \" +\n      \"Files have no children array, while directories always have a children array (which may be empty). \" +\n      \"The output is formatted with 2-space indentation for readability. Only works within allowed directories.\",\n    inputSchema: {\n      path: z.string(),\n      excludePatterns: z.array(z.string()).optional().default([])\n    },\n    outputSchema: { content: z.string() },\n    annotations: { readOnlyHint: true }\n  },\n  async (args: z.infer<typeof DirectoryTreeArgsSchema>) => {\n    interface TreeEntry {\n      name: string;\n      type: 'file' | 'directory';\n      children?: TreeEntry[];\n    }\n    const rootPath = args.path;\n\n    async function buildTree(currentPath: string, excludePatterns: string[] = []): Promise<TreeEntry[]> {\n      const validPath = await validatePath(currentPath);\n      const entries = await fs.readdir(validPath, { withFileTypes: true });\n      const result: TreeEntry[] = [];\n\n      for (const entry of entries) {\n        const relativePath = path.relative(rootPath, path.join(currentPath, entry.name));\n        const shouldExclude = excludePatterns.some(pattern => {\n          if (pattern.includes('*')) {\n            return minimatch(relativePath, pattern, { dot: true });\n          }\n          // For files: match exact name or as part of path\n          // For directories: match as directory path\n          return minimatch(relativePath, pattern, { dot: true }) ||\n            minimatch(relativePath, `**/${pattern}`, { dot: true }) ||\n            minimatch(relativePath, `**/${pattern}/**`, { dot: true });\n        });\n        if (shouldExclude)\n          continue;\n\n        const entryData: TreeEntry = {\n          name: entry.name,\n          type: entry.isDirectory() ? 'directory' : 'file'\n        };\n\n        if (entry.isDirectory()) {\n          const subPath = path.join(currentPath, entry.name);\n          entryData.children = await buildTree(subPath, excludePatterns);\n        }\n\n        result.push(entryData);\n      }\n\n      return result;\n    }\n\n    const treeData = await buildTree(rootPath, args.excludePatterns);\n    const text = JSON.stringify(treeData, null, 2);\n    const contentBlock = { type: \"text\" as const, text };\n    return {\n      content: [contentBlock],\n      structuredContent: { content: text }\n    };\n  }\n);\n\nserver.registerTool(\n  \"move_file\",\n  {\n    title: \"Move File\",\n    description:\n      \"Move or rename files and directories. Can move files between directories \" +\n      \"and rename them in a single operation. If the destination exists, the \" +\n      \"operation will fail. Works across different directories and can be used \" +\n      \"for simple renaming within the same directory. Both source and destination must be within allowed directories.\",\n    inputSchema: {\n      source: z.string(),\n      destination: z.string()\n    },\n    outputSchema: { content: z.string() },\n    annotations: { readOnlyHint: false, idempotentHint: false, destructiveHint: true }\n  },\n  async (args: z.infer<typeof MoveFileArgsSchema>) => {\n    const validSourcePath = await validatePath(args.source);\n    const validDestPath = await validatePath(args.destination);\n    await fs.rename(validSourcePath, validDestPath);\n    const text = `Successfully moved ${args.source} to ${args.destination}`;\n    const contentBlock = { type: \"text\" as const, text };\n    return {\n      content: [contentBlock],\n      structuredContent: { content: text }\n    };\n  }\n);\n\nserver.registerTool(\n  \"search_files\",\n  {\n    title: \"Search Files\",\n    description:\n      \"Recursively search for files and directories matching a pattern. \" +\n      \"The patterns should be glob-style patterns that match paths relative to the working directory. \" +\n      \"Use pattern like '*.ext' to match files in current directory, and '**/*.ext' to match files in all subdirectories. \" +\n      \"Returns full paths to all matching items. Great for finding files when you don't know their exact location. \" +\n      \"Only searches within allowed directories.\",\n    inputSchema: {\n      path: z.string(),\n      pattern: z.string(),\n      excludePatterns: z.array(z.string()).optional().default([])\n    },\n    outputSchema: { content: z.string() },\n    annotations: { readOnlyHint: true }\n  },\n  async (args: z.infer<typeof SearchFilesArgsSchema>) => {\n    const validPath = await validatePath(args.path);\n    const results = await searchFilesWithValidation(validPath, args.pattern, allowedDirectories, { excludePatterns: args.excludePatterns });\n    const text = results.length > 0 ? results.join(\"\\n\") : \"No matches found\";\n    return {\n      content: [{ type: \"text\" as const, text }],\n      structuredContent: { content: text }\n    };\n  }\n);\n\nserver.registerTool(\n  \"get_file_info\",\n  {\n    title: \"Get File Info\",\n    description:\n      \"Retrieve detailed metadata about a file or directory. Returns comprehensive \" +\n      \"information including size, creation time, last modified time, permissions, \" +\n      \"and type. This tool is perfect for understanding file characteristics \" +\n      \"without reading the actual content. Only works within allowed directories.\",\n    inputSchema: {\n      path: z.string()\n    },\n    outputSchema: { content: z.string() },\n    annotations: { readOnlyHint: true }\n  },\n  async (args: z.infer<typeof GetFileInfoArgsSchema>) => {\n    const validPath = await validatePath(args.path);\n    const info = await getFileStats(validPath);\n    const text = Object.entries(info)\n      .map(([key, value]) => `${key}: ${value}`)\n      .join(\"\\n\");\n    return {\n      content: [{ type: \"text\" as const, text }],\n      structuredContent: { content: text }\n    };\n  }\n);\n\nserver.registerTool(\n  \"list_allowed_directories\",\n  {\n    title: \"List Allowed Directories\",\n    description:\n      \"Returns the list of directories that this server is allowed to access. \" +\n      \"Subdirectories within these allowed directories are also accessible. \" +\n      \"Use this to understand which directories and their nested paths are available \" +\n      \"before trying to access files.\",\n    inputSchema: {},\n    outputSchema: { content: z.string() },\n    annotations: { readOnlyHint: true }\n  },\n  async () => {\n    const text = `Allowed directories:\\n${allowedDirectories.join('\\n')}`;\n    return {\n      content: [{ type: \"text\" as const, text }],\n      structuredContent: { content: text }\n    };\n  }\n);\n\n// Updates allowed directories based on MCP client roots\nasync function updateAllowedDirectoriesFromRoots(requestedRoots: Root[]) {\n  const validatedRootDirs = await getValidRootDirectories(requestedRoots);\n  if (validatedRootDirs.length > 0) {\n    allowedDirectories = [...validatedRootDirs];\n    setAllowedDirectories(allowedDirectories); // Update the global state in lib.ts\n    console.error(`Updated allowed directories from MCP roots: ${validatedRootDirs.length} valid directories`);\n  } else {\n    console.error(\"No valid root directories provided by client\");\n  }\n}\n\n// Handles dynamic roots updates during runtime, when client sends \"roots/list_changed\" notification, server fetches the updated roots and replaces all allowed directories with the new roots.\nserver.server.setNotificationHandler(RootsListChangedNotificationSchema, async () => {\n  try {\n    // Request the updated roots list from the client\n    const response = await server.server.listRoots();\n    if (response && 'roots' in response) {\n      await updateAllowedDirectoriesFromRoots(response.roots);\n    }\n  } catch (error) {\n    console.error(\"Failed to request roots from client:\", error instanceof Error ? error.message : String(error));\n  }\n});\n\n// Handles post-initialization setup, specifically checking for and fetching MCP roots.\nserver.server.oninitialized = async () => {\n  const clientCapabilities = server.server.getClientCapabilities();\n\n  if (clientCapabilities?.roots) {\n    try {\n      const response = await server.server.listRoots();\n      if (response && 'roots' in response) {\n        await updateAllowedDirectoriesFromRoots(response.roots);\n      } else {\n        console.error(\"Client returned no roots set, keeping current settings\");\n      }\n    } catch (error) {\n      console.error(\"Failed to request initial roots from client:\", error instanceof Error ? error.message : String(error));\n    }\n  } else {\n    if (allowedDirectories.length > 0) {\n      console.error(\"Client does not support MCP Roots, using allowed directories set from server args:\", allowedDirectories);\n    }else{\n      throw new Error(`Server cannot operate: No allowed directories available. Server was started without command-line directories and client either does not support MCP roots protocol or provided empty roots. Please either: 1) Start server with directory arguments, or 2) Use a client that supports MCP roots protocol and provides valid root directories.`);\n    }\n  }\n};\n\n// Start server\nasync function runServer() {\n  const transport = new StdioServerTransport();\n  await server.connect(transport);\n  console.error(\"Secure MCP Filesystem Server running on stdio\");\n  if (allowedDirectories.length === 0) {\n    console.error(\"Started without allowed directories - waiting for client to provide roots via MCP protocol\");\n  }\n}\n\nrunServer().catch((error) => {\n  console.error(\"Fatal error running server:\", error);\n  process.exit(1);\n});\n"
  },
  {
    "path": "src/filesystem/lib.ts",
    "content": "import fs from \"fs/promises\";\nimport path from \"path\";\nimport os from 'os';\nimport { randomBytes } from 'crypto';\nimport { diffLines, createTwoFilesPatch } from 'diff';\nimport { minimatch } from 'minimatch';\nimport { normalizePath, expandHome } from './path-utils.js';\nimport { isPathWithinAllowedDirectories } from './path-validation.js';\n\n// Global allowed directories - set by the main module\nlet allowedDirectories: string[] = [];\n\n// Function to set allowed directories from the main module\nexport function setAllowedDirectories(directories: string[]): void {\n  allowedDirectories = [...directories];\n}\n\n// Function to get current allowed directories\nexport function getAllowedDirectories(): string[] {\n  return [...allowedDirectories];\n}\n\n// Type definitions\ninterface FileInfo {\n  size: number;\n  created: Date;\n  modified: Date;\n  accessed: Date;\n  isDirectory: boolean;\n  isFile: boolean;\n  permissions: string;\n}\n\nexport interface SearchOptions {\n  excludePatterns?: string[];\n}\n\nexport interface SearchResult {\n  path: string;\n  isDirectory: boolean;\n}\n\n// Pure Utility Functions\nexport function formatSize(bytes: number): string {\n  const units = ['B', 'KB', 'MB', 'GB', 'TB'];\n  if (bytes === 0) return '0 B';\n  \n  const i = Math.floor(Math.log(bytes) / Math.log(1024));\n  \n  if (i < 0 || i === 0) return `${bytes} ${units[0]}`;\n  \n  const unitIndex = Math.min(i, units.length - 1);\n  return `${(bytes / Math.pow(1024, unitIndex)).toFixed(2)} ${units[unitIndex]}`;\n}\n\nexport function normalizeLineEndings(text: string): string {\n  return text.replace(/\\r\\n/g, '\\n');\n}\n\nexport function createUnifiedDiff(originalContent: string, newContent: string, filepath: string = 'file'): string {\n  // Ensure consistent line endings for diff\n  const normalizedOriginal = normalizeLineEndings(originalContent);\n  const normalizedNew = normalizeLineEndings(newContent);\n\n  return createTwoFilesPatch(\n    filepath,\n    filepath,\n    normalizedOriginal,\n    normalizedNew,\n    'original',\n    'modified'\n  );\n}\n\n// Helper function to resolve relative paths against allowed directories\nfunction resolveRelativePathAgainstAllowedDirectories(relativePath: string): string {\n  if (allowedDirectories.length === 0) {\n    // Fallback to process.cwd() if no allowed directories are set\n    return path.resolve(process.cwd(), relativePath);\n  }\n\n  // Try to resolve relative path against each allowed directory\n  for (const allowedDir of allowedDirectories) {\n    const candidate = path.resolve(allowedDir, relativePath);\n    const normalizedCandidate = normalizePath(candidate);\n    \n    // Check if the resulting path lies within any allowed directory\n    if (isPathWithinAllowedDirectories(normalizedCandidate, allowedDirectories)) {\n      return candidate;\n    }\n  }\n  \n  // If no valid resolution found, use the first allowed directory as base\n  // This provides a consistent fallback behavior\n  return path.resolve(allowedDirectories[0], relativePath);\n}\n\n// Security & Validation Functions\nexport async function validatePath(requestedPath: string): Promise<string> {\n  const expandedPath = expandHome(requestedPath);\n  const absolute = path.isAbsolute(expandedPath)\n    ? path.resolve(expandedPath)\n    : resolveRelativePathAgainstAllowedDirectories(expandedPath);\n\n  const normalizedRequested = normalizePath(absolute);\n\n  // Security: Check if path is within allowed directories before any file operations\n  const isAllowed = isPathWithinAllowedDirectories(normalizedRequested, allowedDirectories);\n  if (!isAllowed) {\n    throw new Error(`Access denied - path outside allowed directories: ${absolute} not in ${allowedDirectories.join(', ')}`);\n  }\n\n  // Security: Handle symlinks by checking their real path to prevent symlink attacks\n  // This prevents attackers from creating symlinks that point outside allowed directories\n  try {\n    const realPath = await fs.realpath(absolute);\n    const normalizedReal = normalizePath(realPath);\n    if (!isPathWithinAllowedDirectories(normalizedReal, allowedDirectories)) {\n      throw new Error(`Access denied - symlink target outside allowed directories: ${realPath} not in ${allowedDirectories.join(', ')}`);\n    }\n    return realPath;\n  } catch (error) {\n    // Security: For new files that don't exist yet, verify parent directory\n    // This ensures we can't create files in unauthorized locations\n    if ((error as NodeJS.ErrnoException).code === 'ENOENT') {\n      const parentDir = path.dirname(absolute);\n      try {\n        const realParentPath = await fs.realpath(parentDir);\n        const normalizedParent = normalizePath(realParentPath);\n        if (!isPathWithinAllowedDirectories(normalizedParent, allowedDirectories)) {\n          throw new Error(`Access denied - parent directory outside allowed directories: ${realParentPath} not in ${allowedDirectories.join(', ')}`);\n        }\n        return absolute;\n      } catch {\n        throw new Error(`Parent directory does not exist: ${parentDir}`);\n      }\n    }\n    throw error;\n  }\n}\n\n\n// File Operations\nexport async function getFileStats(filePath: string): Promise<FileInfo> {\n  const stats = await fs.stat(filePath);\n  return {\n    size: stats.size,\n    created: stats.birthtime,\n    modified: stats.mtime,\n    accessed: stats.atime,\n    isDirectory: stats.isDirectory(),\n    isFile: stats.isFile(),\n    permissions: stats.mode.toString(8).slice(-3),\n  };\n}\n\nexport async function readFileContent(filePath: string, encoding: string = 'utf-8'): Promise<string> {\n  return await fs.readFile(filePath, encoding as BufferEncoding);\n}\n\nexport async function writeFileContent(filePath: string, content: string): Promise<void> {\n  try {\n    // Security: 'wx' flag ensures exclusive creation - fails if file/symlink exists,\n    // preventing writes through pre-existing symlinks\n    await fs.writeFile(filePath, content, { encoding: \"utf-8\", flag: 'wx' });\n  } catch (error) {\n    if ((error as NodeJS.ErrnoException).code === 'EEXIST') {\n      // Security: Use atomic rename to prevent race conditions where symlinks\n      // could be created between validation and write. Rename operations\n      // replace the target file atomically and don't follow symlinks.\n      const tempPath = `${filePath}.${randomBytes(16).toString('hex')}.tmp`;\n      try {\n        await fs.writeFile(tempPath, content, 'utf-8');\n        await fs.rename(tempPath, filePath);\n      } catch (renameError) {\n        try {\n          await fs.unlink(tempPath);\n        } catch {}\n        throw renameError;\n      }\n    } else {\n      throw error;\n    }\n  }\n}\n\n\n// File Editing Functions\ninterface FileEdit {\n  oldText: string;\n  newText: string;\n}\n\nexport async function applyFileEdits(\n  filePath: string,\n  edits: FileEdit[],\n  dryRun: boolean = false\n): Promise<string> {\n  // Read file content and normalize line endings\n  const content = normalizeLineEndings(await fs.readFile(filePath, 'utf-8'));\n\n  // Apply edits sequentially\n  let modifiedContent = content;\n  for (const edit of edits) {\n    const normalizedOld = normalizeLineEndings(edit.oldText);\n    const normalizedNew = normalizeLineEndings(edit.newText);\n\n    // If exact match exists, use it\n    if (modifiedContent.includes(normalizedOld)) {\n      modifiedContent = modifiedContent.replace(normalizedOld, normalizedNew);\n      continue;\n    }\n\n    // Otherwise, try line-by-line matching with flexibility for whitespace\n    const oldLines = normalizedOld.split('\\n');\n    const contentLines = modifiedContent.split('\\n');\n    let matchFound = false;\n\n    for (let i = 0; i <= contentLines.length - oldLines.length; i++) {\n      const potentialMatch = contentLines.slice(i, i + oldLines.length);\n\n      // Compare lines with normalized whitespace\n      const isMatch = oldLines.every((oldLine, j) => {\n        const contentLine = potentialMatch[j];\n        return oldLine.trim() === contentLine.trim();\n      });\n\n      if (isMatch) {\n        // Preserve original indentation of first line\n        const originalIndent = contentLines[i].match(/^\\s*/)?.[0] || '';\n        const newLines = normalizedNew.split('\\n').map((line, j) => {\n          if (j === 0) return originalIndent + line.trimStart();\n          // For subsequent lines, try to preserve relative indentation\n          const oldIndent = oldLines[j]?.match(/^\\s*/)?.[0] || '';\n          const newIndent = line.match(/^\\s*/)?.[0] || '';\n          if (oldIndent && newIndent) {\n            const relativeIndent = newIndent.length - oldIndent.length;\n            return originalIndent + ' '.repeat(Math.max(0, relativeIndent)) + line.trimStart();\n          }\n          return line;\n        });\n\n        contentLines.splice(i, oldLines.length, ...newLines);\n        modifiedContent = contentLines.join('\\n');\n        matchFound = true;\n        break;\n      }\n    }\n\n    if (!matchFound) {\n      throw new Error(`Could not find exact match for edit:\\n${edit.oldText}`);\n    }\n  }\n\n  // Create unified diff\n  const diff = createUnifiedDiff(content, modifiedContent, filePath);\n\n  // Format diff with appropriate number of backticks\n  let numBackticks = 3;\n  while (diff.includes('`'.repeat(numBackticks))) {\n    numBackticks++;\n  }\n  const formattedDiff = `${'`'.repeat(numBackticks)}diff\\n${diff}${'`'.repeat(numBackticks)}\\n\\n`;\n\n  if (!dryRun) {\n    // Security: Use atomic rename to prevent race conditions where symlinks\n    // could be created between validation and write. Rename operations\n    // replace the target file atomically and don't follow symlinks.\n    const tempPath = `${filePath}.${randomBytes(16).toString('hex')}.tmp`;\n    try {\n      await fs.writeFile(tempPath, modifiedContent, 'utf-8');\n      await fs.rename(tempPath, filePath);\n    } catch (error) {\n      try {\n        await fs.unlink(tempPath);\n      } catch {}\n      throw error;\n    }\n  }\n\n  return formattedDiff;\n}\n\n// Memory-efficient implementation to get the last N lines of a file\nexport async function tailFile(filePath: string, numLines: number): Promise<string> {\n  const CHUNK_SIZE = 1024; // Read 1KB at a time\n  const stats = await fs.stat(filePath);\n  const fileSize = stats.size;\n  \n  if (fileSize === 0) return '';\n  \n  // Open file for reading\n  const fileHandle = await fs.open(filePath, 'r');\n  try {\n    const lines: string[] = [];\n    let position = fileSize;\n    let chunk = Buffer.alloc(CHUNK_SIZE);\n    let linesFound = 0;\n    let remainingText = '';\n    \n    // Read chunks from the end of the file until we have enough lines\n    while (position > 0 && linesFound < numLines) {\n      const size = Math.min(CHUNK_SIZE, position);\n      position -= size;\n      \n      const { bytesRead } = await fileHandle.read(chunk, 0, size, position);\n      if (!bytesRead) break;\n      \n      // Get the chunk as a string and prepend any remaining text from previous iteration\n      const readData = chunk.slice(0, bytesRead).toString('utf-8');\n      const chunkText = readData + remainingText;\n      \n      // Split by newlines and count\n      const chunkLines = normalizeLineEndings(chunkText).split('\\n');\n      \n      // If this isn't the end of the file, the first line is likely incomplete\n      // Save it to prepend to the next chunk\n      if (position > 0) {\n        remainingText = chunkLines[0];\n        chunkLines.shift(); // Remove the first (incomplete) line\n      }\n      \n      // Add lines to our result (up to the number we need)\n      for (let i = chunkLines.length - 1; i >= 0 && linesFound < numLines; i--) {\n        lines.unshift(chunkLines[i]);\n        linesFound++;\n      }\n    }\n    \n    return lines.join('\\n');\n  } finally {\n    await fileHandle.close();\n  }\n}\n\n// New function to get the first N lines of a file\nexport async function headFile(filePath: string, numLines: number): Promise<string> {\n  const fileHandle = await fs.open(filePath, 'r');\n  try {\n    const lines: string[] = [];\n    let buffer = '';\n    let bytesRead = 0;\n    const chunk = Buffer.alloc(1024); // 1KB buffer\n    \n    // Read chunks and count lines until we have enough or reach EOF\n    while (lines.length < numLines) {\n      const result = await fileHandle.read(chunk, 0, chunk.length, bytesRead);\n      if (result.bytesRead === 0) break; // End of file\n      bytesRead += result.bytesRead;\n      buffer += chunk.slice(0, result.bytesRead).toString('utf-8');\n      \n      const newLineIndex = buffer.lastIndexOf('\\n');\n      if (newLineIndex !== -1) {\n        const completeLines = buffer.slice(0, newLineIndex).split('\\n');\n        buffer = buffer.slice(newLineIndex + 1);\n        for (const line of completeLines) {\n          lines.push(line);\n          if (lines.length >= numLines) break;\n        }\n      }\n    }\n    \n    // If there is leftover content and we still need lines, add it\n    if (buffer.length > 0 && lines.length < numLines) {\n      lines.push(buffer);\n    }\n    \n    return lines.join('\\n');\n  } finally {\n    await fileHandle.close();\n  }\n}\n\nexport async function searchFilesWithValidation(\n  rootPath: string,\n  pattern: string,\n  allowedDirectories: string[],\n  options: SearchOptions = {}\n): Promise<string[]> {\n  const { excludePatterns = [] } = options;\n  const results: string[] = [];\n\n  async function search(currentPath: string) {\n    const entries = await fs.readdir(currentPath, { withFileTypes: true });\n\n    for (const entry of entries) {\n      const fullPath = path.join(currentPath, entry.name);\n\n      try {\n        await validatePath(fullPath);\n\n        const relativePath = path.relative(rootPath, fullPath);\n        const shouldExclude = excludePatterns.some(excludePattern =>\n          minimatch(relativePath, excludePattern, { dot: true })\n        );\n\n        if (shouldExclude) continue;\n\n        // Use glob matching for the search pattern\n        if (minimatch(relativePath, pattern, { dot: true })) {\n          results.push(fullPath);\n        }\n\n        if (entry.isDirectory()) {\n          await search(fullPath);\n        }\n      } catch {\n        continue;\n      }\n    }\n  }\n\n  await search(rootPath);\n  return results;\n}\n"
  },
  {
    "path": "src/filesystem/package.json",
    "content": "{\n  \"name\": \"@modelcontextprotocol/server-filesystem\",\n  \"version\": \"0.6.3\",\n  \"description\": \"MCP server for filesystem access\",\n  \"license\": \"SEE LICENSE IN LICENSE\",\n  \"mcpName\": \"io.github.modelcontextprotocol/server-filesystem\",\n  \"author\": \"Model Context Protocol a Series of LF Projects, LLC.\",\n  \"homepage\": \"https://modelcontextprotocol.io\",\n  \"bugs\": \"https://github.com/modelcontextprotocol/servers/issues\",\n  \"repository\": {\n    \"type\": \"git\",\n    \"url\": \"https://github.com/modelcontextprotocol/servers.git\"\n  },\n  \"type\": \"module\",\n  \"bin\": {\n    \"mcp-server-filesystem\": \"dist/index.js\"\n  },\n  \"files\": [\n    \"dist\"\n  ],\n  \"scripts\": {\n    \"build\": \"tsc && shx chmod +x dist/*.js\",\n    \"prepare\": \"npm run build\",\n    \"watch\": \"tsc --watch\",\n    \"test\": \"vitest run --coverage\"\n  },\n  \"dependencies\": {\n    \"@modelcontextprotocol/sdk\": \"^1.26.0\",\n    \"diff\": \"^8.0.3\",\n    \"glob\": \"^10.5.0\",\n    \"minimatch\": \"^10.0.1\",\n    \"zod-to-json-schema\": \"^3.23.5\"\n  },\n  \"devDependencies\": {\n    \"@types/diff\": \"^5.0.9\",\n    \"@types/minimatch\": \"^5.1.2\",\n    \"@types/node\": \"^22\",\n    \"@vitest/coverage-v8\": \"^2.1.8\",\n    \"shx\": \"^0.3.4\",\n    \"typescript\": \"^5.8.2\",\n    \"vitest\": \"^2.1.8\"\n  }\n}\n"
  },
  {
    "path": "src/filesystem/path-utils.ts",
    "content": "import path from \"path\";\nimport os from 'os';\n\n/**\n * Converts WSL or Unix-style Windows paths to Windows format\n * @param p The path to convert\n * @returns Converted Windows path\n */\nexport function convertToWindowsPath(p: string): string {\n  // Handle WSL paths (/mnt/c/...)\n  // NEVER convert WSL paths - they are valid Linux paths that work with Node.js fs operations in WSL\n  // Converting them to Windows format (C:\\...) breaks fs operations inside WSL\n  if (p.startsWith('/mnt/')) {\n    return p; // Leave WSL paths unchanged\n  }\n\n  // Handle Unix-style Windows paths (/c/...)\n  // Only convert when running on Windows\n  if (p.match(/^\\/[a-zA-Z]\\//) && process.platform === 'win32') {\n    const driveLetter = p.charAt(1).toUpperCase();\n    const pathPart = p.slice(2).replace(/\\//g, '\\\\');\n    return `${driveLetter}:${pathPart}`;\n  }\n\n  // Handle standard Windows paths, ensuring backslashes\n  if (p.match(/^[a-zA-Z]:/)) {\n    return p.replace(/\\//g, '\\\\');\n  }\n\n  // Leave non-Windows paths unchanged\n  return p;\n}\n\n/**\n * Normalizes path by standardizing format while preserving OS-specific behavior\n * @param p The path to normalize\n * @returns Normalized path\n */\nexport function normalizePath(p: string): string {\n  // Remove any surrounding quotes and whitespace\n  p = p.trim().replace(/^[\"']|[\"']$/g, '');\n\n  // Check if this is a Unix path that should not be converted\n  // WSL paths (/mnt/) should ALWAYS be preserved as they work correctly in WSL with Node.js fs\n  // Regular Unix paths should also be preserved\n  const isUnixPath = p.startsWith('/') && (\n    // Always preserve WSL paths (/mnt/c/, /mnt/d/, etc.)\n    p.match(/^\\/mnt\\/[a-z]\\//i) ||\n    // On non-Windows platforms, treat all absolute paths as Unix paths\n    (process.platform !== 'win32') ||\n    // On Windows, preserve Unix paths that aren't Unix-style Windows paths (/c/, /d/, etc.)\n    (process.platform === 'win32' && !p.match(/^\\/[a-zA-Z]\\//))\n  );\n\n  if (isUnixPath) {\n    // For Unix paths, just normalize without converting to Windows format\n    // Replace double slashes with single slashes and remove trailing slashes\n    return p.replace(/\\/+/g, '/').replace(/(?<!^)\\/$/, '');\n  }\n\n  // Convert Unix-style Windows paths (/c/, /d/) to Windows format if on Windows\n  // This function will now leave /mnt/ paths unchanged\n  p = convertToWindowsPath(p);\n\n  // Handle double backslashes, preserving leading UNC \\\\\n  if (p.startsWith('\\\\\\\\')) {\n    // For UNC paths, first normalize any excessive leading backslashes to exactly \\\\\n    // Then normalize double backslashes in the rest of the path\n    let uncPath = p;\n    // Replace multiple leading backslashes with exactly two\n    uncPath = uncPath.replace(/^\\\\{2,}/, '\\\\\\\\');\n    // Now normalize any remaining double backslashes in the rest of the path\n    const restOfPath = uncPath.substring(2).replace(/\\\\\\\\/g, '\\\\');\n    p = '\\\\\\\\' + restOfPath;\n  } else {\n    // For non-UNC paths, normalize all double backslashes\n    p = p.replace(/\\\\\\\\/g, '\\\\');\n  }\n\n  // On Windows, if we have a bare drive letter (e.g. \"C:\"), append a separator\n  // so path.normalize doesn't return \"C:.\" which can break path validation.\n  if (process.platform === 'win32' && /^[a-zA-Z]:$/.test(p)) {\n    p = p + path.sep;\n  }\n\n  // Use Node's path normalization, which handles . and .. segments\n  let normalized = path.normalize(p);\n\n  // Fix UNC paths after normalization (path.normalize can remove a leading backslash)\n  if (p.startsWith('\\\\\\\\') && !normalized.startsWith('\\\\\\\\')) {\n    normalized = '\\\\' + normalized;\n  }\n\n  // Handle Windows paths: convert slashes and ensure drive letter is capitalized\n  if (normalized.match(/^[a-zA-Z]:/)) {\n    let result = normalized.replace(/\\//g, '\\\\');\n    // Capitalize drive letter if present\n    if (/^[a-z]:/.test(result)) {\n      result = result.charAt(0).toUpperCase() + result.slice(1);\n    }\n    return result;\n  }\n\n  // On Windows, convert forward slashes to backslashes for relative paths\n  // On Linux/Unix, preserve forward slashes\n  if (process.platform === 'win32') {\n    return normalized.replace(/\\//g, '\\\\');\n  }\n\n  // On non-Windows platforms, keep the normalized path as-is\n  return normalized;\n}\n\n/**\n * Expands home directory tildes in paths\n * @param filepath The path to expand\n * @returns Expanded path\n */\nexport function expandHome(filepath: string): string {\n  if (filepath.startsWith('~/') || filepath === '~') {\n    return path.join(os.homedir(), filepath.slice(1));\n  }\n  return filepath;\n}\n\n"
  },
  {
    "path": "src/filesystem/path-validation.ts",
    "content": "import path from 'path';\n\n/**\n * Checks if an absolute path is within any of the allowed directories.\n * \n * @param absolutePath - The absolute path to check (will be normalized)\n * @param allowedDirectories - Array of absolute allowed directory paths (will be normalized)\n * @returns true if the path is within an allowed directory, false otherwise\n * @throws Error if given relative paths after normalization\n */\nexport function isPathWithinAllowedDirectories(absolutePath: string, allowedDirectories: string[]): boolean {\n  // Type validation\n  if (typeof absolutePath !== 'string' || !Array.isArray(allowedDirectories)) {\n    return false;\n  }\n\n  // Reject empty inputs\n  if (!absolutePath || allowedDirectories.length === 0) {\n    return false;\n  }\n\n  // Reject null bytes (forbidden in paths)\n  if (absolutePath.includes('\\x00')) {\n    return false;\n  }\n\n  // Normalize the input path\n  let normalizedPath: string;\n  try {\n    normalizedPath = path.resolve(path.normalize(absolutePath));\n  } catch {\n    return false;\n  }\n\n  // Verify it's absolute after normalization\n  if (!path.isAbsolute(normalizedPath)) {\n    throw new Error('Path must be absolute after normalization');\n  }\n\n  // Check against each allowed directory\n  return allowedDirectories.some(dir => {\n    if (typeof dir !== 'string' || !dir) {\n      return false;\n    }\n\n    // Reject null bytes in allowed dirs\n    if (dir.includes('\\x00')) {\n      return false;\n    }\n\n    // Normalize the allowed directory\n    let normalizedDir: string;\n    try {\n      normalizedDir = path.resolve(path.normalize(dir));\n    } catch {\n      return false;\n    }\n\n    // Verify allowed directory is absolute after normalization\n    if (!path.isAbsolute(normalizedDir)) {\n      throw new Error('Allowed directories must be absolute paths after normalization');\n    }\n\n    // Check if normalizedPath is within normalizedDir\n    // Path is inside if it's the same or a subdirectory\n    if (normalizedPath === normalizedDir) {\n      return true;\n    }\n    \n    // Special case for root directory to avoid double slash\n    // On Windows, we need to check if both paths are on the same drive\n    if (normalizedDir === path.sep) {\n      return normalizedPath.startsWith(path.sep);\n    }\n    \n    // On Windows, also check for drive root (e.g., \"C:\\\")\n    if (path.sep === '\\\\' && normalizedDir.match(/^[A-Za-z]:\\\\?$/)) {\n      // Ensure both paths are on the same drive\n      const dirDrive = normalizedDir.charAt(0).toLowerCase();\n      const pathDrive = normalizedPath.charAt(0).toLowerCase();\n      return pathDrive === dirDrive && normalizedPath.startsWith(normalizedDir.replace(/\\\\?$/, '\\\\'));\n    }\n    \n    return normalizedPath.startsWith(normalizedDir + path.sep);\n  });\n}\n"
  },
  {
    "path": "src/filesystem/roots-utils.ts",
    "content": "import { promises as fs, type Stats } from 'fs';\nimport path from 'path';\nimport os from 'os';\nimport { normalizePath } from './path-utils.js';\nimport type { Root } from '@modelcontextprotocol/sdk/types.js';\nimport { fileURLToPath } from \"url\";\n\n/**\n * Converts a root URI to a normalized directory path with basic security validation.\n * @param rootUri - File URI (file://...) or plain directory path\n * @returns Promise resolving to validated path or null if invalid\n */\nasync function parseRootUri(rootUri: string): Promise<string | null> {\n  try {\n    const rawPath = rootUri.startsWith('file://') ? fileURLToPath(rootUri) : rootUri;\n    const expandedPath = rawPath.startsWith('~/') || rawPath === '~' \n      ? path.join(os.homedir(), rawPath.slice(1)) \n      : rawPath;\n    const absolutePath = path.resolve(expandedPath);\n    const resolvedPath = await fs.realpath(absolutePath);\n    return normalizePath(resolvedPath);\n  } catch {\n    return null; // Path doesn't exist or other error\n  }\n}\n\n/**\n * Formats error message for directory validation failures.\n * @param dir - Directory path that failed validation\n * @param error - Error that occurred during validation\n * @param reason - Specific reason for failure\n * @returns Formatted error message\n */\nfunction formatDirectoryError(dir: string, error?: unknown, reason?: string): string {\n  if (reason) {\n    return `Skipping ${reason}: ${dir}`;\n  }\n  const message = error instanceof Error ? error.message : String(error);\n  return `Skipping invalid directory: ${dir} due to error: ${message}`;\n}\n\n/**\n * Resolves requested root directories from MCP root specifications.\n * \n * Converts root URI specifications (file:// URIs or plain paths) into normalized\n * directory paths, validating that each path exists and is a directory.\n * Includes symlink resolution for security.\n * \n * @param requestedRoots - Array of root specifications with URI and optional name\n * @returns Promise resolving to array of validated directory paths\n */\nexport async function getValidRootDirectories(\n  requestedRoots: readonly Root[]\n): Promise<string[]> {\n  const validatedDirectories: string[] = [];\n  \n  for (const requestedRoot of requestedRoots) {\n    const resolvedPath = await parseRootUri(requestedRoot.uri);\n    if (!resolvedPath) {\n      console.error(formatDirectoryError(requestedRoot.uri, undefined, 'invalid path or inaccessible'));\n      continue;\n    }\n    \n    try {\n      const stats: Stats = await fs.stat(resolvedPath);\n      if (stats.isDirectory()) {\n        validatedDirectories.push(resolvedPath);\n      } else {\n        console.error(formatDirectoryError(resolvedPath, undefined, 'non-directory root'));\n      }\n    } catch (error) {\n      console.error(formatDirectoryError(resolvedPath, error));\n    }\n  }\n  \n  return validatedDirectories;\n}"
  },
  {
    "path": "src/filesystem/tsconfig.json",
    "content": "{\n  \"extends\": \"../../tsconfig.json\",\n  \"compilerOptions\": {\n    \"outDir\": \"./dist\",\n    \"rootDir\": \".\",\n    \"moduleResolution\": \"NodeNext\",\n    \"module\": \"NodeNext\"\n  },\n  \"include\": [\n    \"./**/*.ts\"\n  ],\n  \"exclude\": [\n    \"**/__tests__/**\",\n    \"**/*.test.ts\",\n    \"**/*.spec.ts\",\n    \"vitest.config.ts\"\n  ]\n}\n"
  },
  {
    "path": "src/filesystem/vitest.config.ts",
    "content": "import { defineConfig } from 'vitest/config';\n\nexport default defineConfig({\n  test: {\n    globals: true,\n    environment: 'node',\n    include: ['**/__tests__/**/*.test.ts'],\n    coverage: {\n      provider: 'v8',\n      include: ['**/*.ts'],\n      exclude: ['**/__tests__/**', '**/dist/**'],\n    },\n  },\n});\n"
  },
  {
    "path": "src/git/.gitignore",
    "content": "__pycache__\n.venv\n"
  },
  {
    "path": "src/git/.python-version",
    "content": "3.10\n"
  },
  {
    "path": "src/git/Dockerfile",
    "content": "# Use a Python image with uv pre-installed\nFROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim AS uv\n\n# Install the project into `/app`\nWORKDIR /app\n\n# Enable bytecode compilation\nENV UV_COMPILE_BYTECODE=1\n\n# Copy from the cache instead of linking since it's a mounted volume\nENV UV_LINK_MODE=copy\n\n# Install the project's dependencies using the lockfile and settings\nRUN --mount=type=cache,target=/root/.cache/uv \\\n    --mount=type=bind,source=uv.lock,target=uv.lock \\\n    --mount=type=bind,source=pyproject.toml,target=pyproject.toml \\\n    uv sync --locked --no-install-project --no-dev --no-editable\n\n# Then, add the rest of the project source code and install it\n# Installing separately from its dependencies allows optimal layer caching\nADD . /app\nRUN --mount=type=cache,target=/root/.cache/uv \\\n    uv sync --locked --no-dev --no-editable\n\nFROM python:3.12-slim-bookworm\n\nRUN apt-get update && apt-get install -y git git-lfs && rm -rf /var/lib/apt/lists/* \\\n    && git lfs install --system\n\nWORKDIR /app\n \nCOPY --from=uv /root/.local /root/.local\nCOPY --from=uv --chown=app:app /app/.venv /app/.venv\n\n# Place executables in the environment at the front of the path\nENV PATH=\"/app/.venv/bin:$PATH\"\n\n# when running the container, add --db-path and a bind mount to the host's db file\nENTRYPOINT [\"mcp-server-git\"]\n"
  },
  {
    "path": "src/git/LICENSE",
    "content": "Copyright (c) 2024 Anthropic, PBC.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"
  },
  {
    "path": "src/git/README.md",
    "content": "# mcp-server-git: A git MCP server\n\n<!-- mcp-name: io.github.modelcontextprotocol/server-git -->\n\n## Overview\n\nA Model Context Protocol server for Git repository interaction and automation. This server provides tools to read, search, and manipulate Git repositories via Large Language Models.\n\nPlease note that mcp-server-git is currently in early development. The functionality and available tools are subject to change and expansion as we continue to develop and improve the server.\n\n### Tools\n\n1. `git_status`\n   - Shows the working tree status\n   - Input:\n     - `repo_path` (string): Path to Git repository\n   - Returns: Current status of working directory as text output\n\n2. `git_diff_unstaged`\n   - Shows changes in working directory not yet staged\n   - Inputs:\n     - `repo_path` (string): Path to Git repository\n     - `context_lines` (number, optional): Number of context lines to show (default: 3)\n   - Returns: Diff output of unstaged changes\n\n3. `git_diff_staged`\n   - Shows changes that are staged for commit\n   - Inputs:\n     - `repo_path` (string): Path to Git repository\n     - `context_lines` (number, optional): Number of context lines to show (default: 3)\n   - Returns: Diff output of staged changes\n\n4. `git_diff`\n   - Shows differences between branches or commits\n   - Inputs:\n     - `repo_path` (string): Path to Git repository\n     - `target` (string): Target branch or commit to compare with\n     - `context_lines` (number, optional): Number of context lines to show (default: 3)\n   - Returns: Diff output comparing current state with target\n\n5. `git_commit`\n   - Records changes to the repository\n   - Inputs:\n     - `repo_path` (string): Path to Git repository\n     - `message` (string): Commit message\n   - Returns: Confirmation with new commit hash\n\n6. `git_add`\n   - Adds file contents to the staging area\n   - Inputs:\n     - `repo_path` (string): Path to Git repository\n     - `files` (string[]): Array of file paths to stage\n   - Returns: Confirmation of staged files\n\n7. `git_reset`\n   - Unstages all staged changes\n   - Input:\n     - `repo_path` (string): Path to Git repository\n   - Returns: Confirmation of reset operation\n\n8. `git_log`\n   - Shows the commit logs with optional date filtering\n   - Inputs:\n     - `repo_path` (string): Path to Git repository\n     - `max_count` (number, optional): Maximum number of commits to show (default: 10)\n     - `start_timestamp` (string, optional): Start timestamp for filtering commits. Accepts ISO 8601 format (e.g., '2024-01-15T14:30:25'), relative dates (e.g., '2 weeks ago', 'yesterday'), or absolute dates (e.g., '2024-01-15', 'Jan 15 2024')\n     - `end_timestamp` (string, optional): End timestamp for filtering commits. Accepts ISO 8601 format (e.g., '2024-01-15T14:30:25'), relative dates (e.g., '2 weeks ago', 'yesterday'), or absolute dates (e.g., '2024-01-15', 'Jan 15 2024')\n   - Returns: Array of commit entries with hash, author, date, and message\n\n9. `git_create_branch`\n   - Creates a new branch\n   - Inputs:\n     - `repo_path` (string): Path to Git repository\n     - `branch_name` (string): Name of the new branch\n     - `base_branch` (string, optional): Base branch to create from (defaults to current branch)\n   - Returns: Confirmation of branch creation\n10. `git_checkout`\n   - Switches branches\n   - Inputs:\n     - `repo_path` (string): Path to Git repository\n     - `branch_name` (string): Name of branch to checkout\n   - Returns: Confirmation of branch switch\n11. `git_show`\n   - Shows the contents of a commit\n   - Inputs:\n     - `repo_path` (string): Path to Git repository\n     - `revision` (string): The revision (commit hash, branch name, tag) to show\n   - Returns: Contents of the specified commit\n\n12. `git_branch`\n   - List Git branches\n   - Inputs:\n     - `repo_path` (string): Path to the Git repository.\n     - `branch_type` (string): Whether to list local branches ('local'), remote branches ('remote') or all branches('all').\n     - `contains` (string, optional): The commit sha that branch should contain. Do not pass anything to this param if no commit sha is specified\n     - `not_contains` (string, optional): The commit sha that branch should NOT contain. Do not pass anything to this param if no commit sha is specified\n   - Returns: List of branches\n\n## Installation\n\n### Using uv (recommended)\n\nWhen using [`uv`](https://docs.astral.sh/uv/) no specific installation is needed. We will\nuse [`uvx`](https://docs.astral.sh/uv/guides/tools/) to directly run *mcp-server-git*.\n\n### Using PIP\n\nAlternatively you can install `mcp-server-git` via pip:\n\n```\npip install mcp-server-git\n```\n\nAfter installation, you can run it as a script using:\n\n```\npython -m mcp_server_git\n```\n\n## Configuration\n\n### Usage with Claude Desktop\n\nAdd this to your `claude_desktop_config.json`:\n\n<details>\n<summary>Using uvx</summary>\n\n```json\n\"mcpServers\": {\n  \"git\": {\n    \"command\": \"uvx\",\n    \"args\": [\"mcp-server-git\", \"--repository\", \"path/to/git/repo\"]\n  }\n}\n```\n</details>\n\n<details>\n<summary>Using docker</summary>\n\n* Note: replace '/Users/username' with the a path that you want to be accessible by this tool\n\n```json\n\"mcpServers\": {\n  \"git\": {\n    \"command\": \"docker\",\n    \"args\": [\"run\", \"--rm\", \"-i\", \"--mount\", \"type=bind,src=/Users/username,dst=/Users/username\", \"mcp/git\"]\n  }\n}\n```\n</details>\n\n<details>\n<summary>Using pip installation</summary>\n\n```json\n\"mcpServers\": {\n  \"git\": {\n    \"command\": \"python\",\n    \"args\": [\"-m\", \"mcp_server_git\", \"--repository\", \"path/to/git/repo\"]\n  }\n}\n```\n</details>\n\n### Usage with VS Code\n\nFor quick installation, use one of the one-click install buttons below...\n\n[![Install with UV in VS Code](https://img.shields.io/badge/VS_Code-UV-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=git&config=%7B%22command%22%3A%22uvx%22%2C%22args%22%3A%5B%22mcp-server-git%22%5D%7D) [![Install with UV in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-UV-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=git&config=%7B%22command%22%3A%22uvx%22%2C%22args%22%3A%5B%22mcp-server-git%22%5D%7D&quality=insiders)\n\n[![Install with Docker in VS Code](https://img.shields.io/badge/VS_Code-Docker-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=git&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22--rm%22%2C%22-i%22%2C%22--mount%22%2C%22type%3Dbind%2Csrc%3D%24%7BworkspaceFolder%7D%2Cdst%3D%2Fworkspace%22%2C%22mcp%2Fgit%22%5D%7D) [![Install with Docker in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Docker-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=git&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22--rm%22%2C%22-i%22%2C%22--mount%22%2C%22type%3Dbind%2Csrc%3D%24%7BworkspaceFolder%7D%2Cdst%3D%2Fworkspace%22%2C%22mcp%2Fgit%22%5D%7D&quality=insiders)\n\nFor manual installation, you can configure the MCP server using one of these methods:\n\n**Method 1: User Configuration (Recommended)**\nAdd the configuration to your user-level MCP configuration file. Open the Command Palette (`Ctrl + Shift + P`) and run `MCP: Open User Configuration`. This will open your user `mcp.json` file where you can add the server configuration.\n\n**Method 2: Workspace Configuration**\nAlternatively, you can add the configuration to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others.\n\n> For more details about MCP configuration in VS Code, see the [official VS Code MCP documentation](https://code.visualstudio.com/docs/copilot/customization/mcp-servers).\n\n```json\n{\n  \"servers\": {\n    \"git\": {\n      \"command\": \"uvx\",\n      \"args\": [\"mcp-server-git\"]\n    }\n  }\n}\n```\n\nFor Docker installation:\n\n```json\n{\n  \"mcp\": {\n    \"servers\": {\n      \"git\": {\n        \"command\": \"docker\",\n        \"args\": [\n          \"run\",\n          \"--rm\",\n          \"-i\",\n          \"--mount\", \"type=bind,src=${workspaceFolder},dst=/workspace\",\n          \"mcp/git\"\n        ]\n      }\n    }\n  }\n}\n```\n\n### Usage with [Zed](https://github.com/zed-industries/zed)\n\nAdd to your Zed settings.json:\n\n<details>\n<summary>Using uvx</summary>\n\n```json\n\"context_servers\": [\n  \"mcp-server-git\": {\n    \"command\": {\n      \"path\": \"uvx\",\n      \"args\": [\"mcp-server-git\"]\n    }\n  }\n],\n```\n</details>\n\n<details>\n<summary>Using pip installation</summary>\n\n```json\n\"context_servers\": {\n  \"mcp-server-git\": {\n    \"command\": {\n      \"path\": \"python\",\n      \"args\": [\"-m\", \"mcp_server_git\"]\n    }\n  }\n},\n```\n</details>\n\n### Usage with [Zencoder](https://zencoder.ai)\n\n1. Go to the Zencoder menu (...)\n2. From the dropdown menu, select `Agent Tools`\n3. Click on the `Add Custom MCP`\n4. Add the name (i.e. git) and server configuration from below, and make sure to hit the `Install` button\n\n<details>\n<summary>Using uvx</summary>\n\n```json\n{\n    \"command\": \"uvx\",\n    \"args\": [\"mcp-server-git\", \"--repository\", \"path/to/git/repo\"]\n}\n```\n</details>\n\n## Debugging\n\nYou can use the MCP inspector to debug the server. For uvx installations:\n\n```\nnpx @modelcontextprotocol/inspector uvx mcp-server-git\n```\n\nOr if you've installed the package in a specific directory or are developing on it:\n\n```\ncd path/to/servers/src/git\nnpx @modelcontextprotocol/inspector uv run mcp-server-git\n```\n\nRunning `tail -n 20 -f ~/Library/Logs/Claude/mcp*.log` will show the logs from the server and may\nhelp you debug any issues.\n\n## Development\n\nIf you are doing local development, there are two ways to test your changes:\n\n1. Run the MCP inspector to test your changes. See [Debugging](#debugging) for run instructions.\n\n2. Test using the Claude desktop app. Add the following to your `claude_desktop_config.json`:\n\n### Docker\n\n```json\n{\n  \"mcpServers\": {\n    \"git\": {\n      \"command\": \"docker\",\n      \"args\": [\n        \"run\",\n        \"--rm\",\n        \"-i\",\n        \"--mount\", \"type=bind,src=/Users/username/Desktop,dst=/projects/Desktop\",\n        \"--mount\", \"type=bind,src=/path/to/other/allowed/dir,dst=/projects/other/allowed/dir,ro\",\n        \"--mount\", \"type=bind,src=/path/to/file.txt,dst=/projects/path/to/file.txt\",\n        \"mcp/git\"\n      ]\n    }\n  }\n}\n```\n\n### UVX\n```json\n{\n\"mcpServers\": {\n  \"git\": {\n    \"command\": \"uv\",\n    \"args\": [\n      \"--directory\",\n      \"/<path to mcp-servers>/mcp-servers/src/git\",\n      \"run\",\n      \"mcp-server-git\"\n    ]\n    }\n  }\n}\n```\n\n## Build\n\nDocker build:\n\n```bash\ncd src/git\ndocker build -t mcp/git .\n```\n\n## License\n\nThis MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository.\n"
  },
  {
    "path": "src/git/pyproject.toml",
    "content": "[project]\nname = \"mcp-server-git\"\nversion = \"0.6.2\"\ndescription = \"A Model Context Protocol server providing tools to read, search, and manipulate Git repositories programmatically via LLMs\"\nreadme = \"README.md\"\nrequires-python = \">=3.10\"\nauthors = [{ name = \"Anthropic, PBC.\" }]\nmaintainers = [{ name = \"David Soria Parra\", email = \"davidsp@anthropic.com\" }]\nkeywords = [\"git\", \"mcp\", \"llm\", \"automation\"]\nlicense = { text = \"MIT\" }\nclassifiers = [\n    \"Development Status :: 4 - Beta\",\n    \"Intended Audience :: Developers\",\n    \"License :: OSI Approved :: MIT License\",\n    \"Programming Language :: Python :: 3\",\n    \"Programming Language :: Python :: 3.10\",\n]\ndependencies = [\n    \"click>=8.1.7\",\n    \"gitpython>=3.1.45\",\n    \"mcp>=1.0.0\",\n    \"pydantic>=2.0.0\",\n]\n\n[project.scripts]\nmcp-server-git = \"mcp_server_git:main\"\n\n[build-system]\nrequires = [\"hatchling\"]\nbuild-backend = \"hatchling.build\"\n\n[dependency-groups]\ndev = [\"pyright>=1.1.407\", \"ruff>=0.7.3\", \"pytest>=8.0.0\"]\n\n[tool.pytest.ini_options]\ntestpaths = [\"tests\"]\npython_files = \"test_*.py\"\npython_classes = \"Test*\"\npython_functions = \"test_*\"\n"
  },
  {
    "path": "src/git/src/mcp_server_git/__init__.py",
    "content": "import click\nfrom pathlib import Path\nimport logging\nimport sys\nfrom .server import serve\n\n@click.command()\n@click.option(\"--repository\", \"-r\", type=Path, help=\"Git repository path\")\n@click.option(\"-v\", \"--verbose\", count=True)\ndef main(repository: Path | None, verbose: bool) -> None:\n    \"\"\"MCP Git Server - Git functionality for MCP\"\"\"\n    import asyncio\n\n    logging_level = logging.WARN\n    if verbose == 1:\n        logging_level = logging.INFO\n    elif verbose >= 2:\n        logging_level = logging.DEBUG\n\n    logging.basicConfig(level=logging_level, stream=sys.stderr)\n    asyncio.run(serve(repository))\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "src/git/src/mcp_server_git/__main__.py",
    "content": "# __main__.py\n\nfrom mcp_server_git import main\n\nmain()\n"
  },
  {
    "path": "src/git/src/mcp_server_git/py.typed",
    "content": ""
  },
  {
    "path": "src/git/src/mcp_server_git/server.py",
    "content": "import logging\nfrom pathlib import Path\nfrom typing import Sequence, Optional\nfrom mcp.server import Server\nfrom mcp.server.session import ServerSession\nfrom mcp.server.stdio import stdio_server\nfrom mcp.types import (\n    ClientCapabilities,\n    TextContent,\n    Tool,\n    ListRootsResult,\n    RootsCapability,\n    ToolAnnotations,\n)\nfrom enum import Enum\nimport git\nfrom git.exc import BadName\nfrom pydantic import BaseModel, Field\n\n# Default number of context lines to show in diff output\nDEFAULT_CONTEXT_LINES = 3\n\nclass GitStatus(BaseModel):\n    repo_path: str\n\nclass GitDiffUnstaged(BaseModel):\n    repo_path: str\n    context_lines: int = DEFAULT_CONTEXT_LINES\n\nclass GitDiffStaged(BaseModel):\n    repo_path: str\n    context_lines: int = DEFAULT_CONTEXT_LINES\n\nclass GitDiff(BaseModel):\n    repo_path: str\n    target: str\n    context_lines: int = DEFAULT_CONTEXT_LINES\n\nclass GitCommit(BaseModel):\n    repo_path: str\n    message: str\n\nclass GitAdd(BaseModel):\n    repo_path: str\n    files: list[str]\n\nclass GitReset(BaseModel):\n    repo_path: str\n\nclass GitLog(BaseModel):\n    repo_path: str\n    max_count: int = 10\n    start_timestamp: Optional[str] = Field(\n        None,\n        description=\"Start timestamp for filtering commits. Accepts: ISO 8601 format (e.g., '2024-01-15T14:30:25'), relative dates (e.g., '2 weeks ago', 'yesterday'), or absolute dates (e.g., '2024-01-15', 'Jan 15 2024')\"\n    )\n    end_timestamp: Optional[str] = Field(\n        None,\n        description=\"End timestamp for filtering commits. Accepts: ISO 8601 format (e.g., '2024-01-15T14:30:25'), relative dates (e.g., '2 weeks ago', 'yesterday'), or absolute dates (e.g., '2024-01-15', 'Jan 15 2024')\"\n    )\n\nclass GitCreateBranch(BaseModel):\n    repo_path: str\n    branch_name: str\n    base_branch: str | None = None\n\nclass GitCheckout(BaseModel):\n    repo_path: str\n    branch_name: str\n\nclass GitShow(BaseModel):\n    repo_path: str\n    revision: str\n\n\n\nclass GitBranch(BaseModel):\n    repo_path: str = Field(\n        ...,\n        description=\"The path to the Git repository.\",\n    )\n    branch_type: str = Field(\n        ...,\n        description=\"Whether to list local branches ('local'), remote branches ('remote') or all branches('all').\",\n    )\n    contains: Optional[str] = Field(\n        None,\n        description=\"The commit sha that branch should contain. Do not pass anything to this param if no commit sha is specified\",\n    )\n    not_contains: Optional[str] = Field(\n        None,\n        description=\"The commit sha that branch should NOT contain. Do not pass anything to this param if no commit sha is specified\",\n    )\n\n\nclass GitTools(str, Enum):\n    STATUS = \"git_status\"\n    DIFF_UNSTAGED = \"git_diff_unstaged\"\n    DIFF_STAGED = \"git_diff_staged\"\n    DIFF = \"git_diff\"\n    COMMIT = \"git_commit\"\n    ADD = \"git_add\"\n    RESET = \"git_reset\"\n    LOG = \"git_log\"\n    CREATE_BRANCH = \"git_create_branch\"\n    CHECKOUT = \"git_checkout\"\n    SHOW = \"git_show\"\n\n    BRANCH = \"git_branch\"\n\ndef git_status(repo: git.Repo) -> str:\n    return repo.git.status()\n\ndef git_diff_unstaged(repo: git.Repo, context_lines: int = DEFAULT_CONTEXT_LINES) -> str:\n    return repo.git.diff(f\"--unified={context_lines}\")\n\ndef git_diff_staged(repo: git.Repo, context_lines: int = DEFAULT_CONTEXT_LINES) -> str:\n    return repo.git.diff(f\"--unified={context_lines}\", \"--cached\")\n\ndef git_diff(repo: git.Repo, target: str, context_lines: int = DEFAULT_CONTEXT_LINES) -> str:\n    # Defense in depth: reject targets starting with '-' to prevent flag injection,\n    # even if a malicious ref with that name exists (e.g. via filesystem manipulation)\n    if target.startswith(\"-\"):\n        raise BadName(f\"Invalid target: '{target}' - cannot start with '-'\")\n    repo.rev_parse(target)  # Validates target is a real git ref, throws BadName if not\n    return repo.git.diff(f\"--unified={context_lines}\", target)\n\ndef git_commit(repo: git.Repo, message: str) -> str:\n    commit = repo.index.commit(message)\n    return f\"Changes committed successfully with hash {commit.hexsha}\"\n\ndef git_add(repo: git.Repo, files: list[str]) -> str:\n    if files == [\".\"]:\n        repo.git.add(\".\")\n    else:\n        # Use '--' to prevent files starting with '-' from being interpreted as options\n        repo.git.add(\"--\", *files)\n    return \"Files staged successfully\"\n\ndef git_reset(repo: git.Repo) -> str:\n    repo.index.reset()\n    return \"All staged changes reset\"\n\ndef git_log(repo: git.Repo, max_count: int = 10, start_timestamp: Optional[str] = None, end_timestamp: Optional[str] = None) -> list[str]:\n    if start_timestamp or end_timestamp:\n        # Defense in depth: reject timestamps starting with '-' to prevent flag injection\n        if start_timestamp and start_timestamp.startswith(\"-\"):\n            raise ValueError(f\"Invalid start_timestamp: '{start_timestamp}' - cannot start with '-'\")\n        if end_timestamp and end_timestamp.startswith(\"-\"):\n            raise ValueError(f\"Invalid end_timestamp: '{end_timestamp}' - cannot start with '-'\")\n        # Use git log command with date filtering\n        args = []\n        if start_timestamp:\n            args.extend(['--since', start_timestamp])\n        if end_timestamp:\n            args.extend(['--until', end_timestamp])\n        args.extend(['--format=%H%n%an%n%ad%n%s%n'])\n\n        log_output = repo.git.log(*args).split('\\n')\n\n        log = []\n        # Process commits in groups of 4 (hash, author, date, message)\n        for i in range(0, len(log_output), 4):\n            if i + 3 < len(log_output) and len(log) < max_count:\n                log.append(\n                    f\"Commit: {log_output[i]}\\n\"\n                    f\"Author: {log_output[i+1]}\\n\"\n                    f\"Date: {log_output[i+2]}\\n\"\n                    f\"Message: {log_output[i+3]}\\n\"\n                )\n        return log\n    else:\n        # Use existing logic for simple log without date filtering\n        commits = list(repo.iter_commits(max_count=max_count))\n        log = []\n        for commit in commits:\n            log.append(\n                f\"Commit: {commit.hexsha!r}\\n\"\n                f\"Author: {commit.author!r}\\n\"\n                f\"Date: {commit.authored_datetime}\\n\"\n                f\"Message: {commit.message!r}\\n\"\n            )\n        return log\n\ndef git_create_branch(repo: git.Repo, branch_name: str, base_branch: str | None = None) -> str:\n    # Defense in depth: reject names starting with '-' to prevent flag injection\n    if branch_name.startswith(\"-\"):\n        raise BadName(f\"Invalid branch name: '{branch_name}' - cannot start with '-'\")\n    if base_branch and base_branch.startswith(\"-\"):\n        raise BadName(f\"Invalid base branch: '{base_branch}' - cannot start with '-'\")\n    if base_branch:\n        base = repo.references[base_branch]\n    else:\n        base = repo.active_branch\n\n    repo.create_head(branch_name, base)\n    return f\"Created branch '{branch_name}' from '{base.name}'\"\n\ndef git_checkout(repo: git.Repo, branch_name: str) -> str:\n    # Defense in depth: reject branch names starting with '-' to prevent flag injection,\n    # even if a malicious ref with that name exists (e.g. via filesystem manipulation)\n    if branch_name.startswith(\"-\"):\n        raise BadName(f\"Invalid branch name: '{branch_name}' - cannot start with '-'\")\n    repo.rev_parse(branch_name)  # Validates branch_name is a real git ref, throws BadName if not\n    repo.git.checkout(branch_name)\n    return f\"Switched to branch '{branch_name}'\"\n\n\n\ndef git_show(repo: git.Repo, revision: str) -> str:\n    # Defense in depth: reject revisions starting with '-' to prevent flag injection,\n    # even if a malicious ref with that name exists (e.g. via filesystem manipulation)\n    if revision.startswith(\"-\"):\n        raise BadName(f\"Invalid revision: '{revision}' - cannot start with '-'\")\n    commit = repo.commit(revision)\n    output = [\n        f\"Commit: {commit.hexsha!r}\\n\"\n        f\"Author: {commit.author!r}\\n\"\n        f\"Date: {commit.authored_datetime!r}\\n\"\n        f\"Message: {commit.message!r}\\n\"\n    ]\n    if commit.parents:\n        parent = commit.parents[0]\n        diff = parent.diff(commit, create_patch=True)\n    else:\n        diff = commit.diff(git.NULL_TREE, create_patch=True)\n    for d in diff:\n        output.append(f\"\\n--- {d.a_path}\\n+++ {d.b_path}\\n\")\n        if d.diff is None:\n            continue\n        if isinstance(d.diff, bytes):\n            output.append(d.diff.decode('utf-8'))\n        else:\n            output.append(d.diff)\n    return \"\".join(output)\n\ndef validate_repo_path(repo_path: Path, allowed_repository: Path | None) -> None:\n    \"\"\"Validate that repo_path is within the allowed repository path.\"\"\"\n    if allowed_repository is None:\n        return  # No restriction configured\n\n    # Resolve both paths to handle symlinks and relative paths\n    try:\n        resolved_repo = repo_path.resolve()\n        resolved_allowed = allowed_repository.resolve()\n    except (OSError, RuntimeError):\n        raise ValueError(f\"Invalid path: {repo_path}\")\n\n    # Check if repo_path is the same as or a subdirectory of allowed_repository\n    try:\n        resolved_repo.relative_to(resolved_allowed)\n    except ValueError:\n        raise ValueError(\n            f\"Repository path '{repo_path}' is outside the allowed repository '{allowed_repository}'\"\n        )\n\n\ndef git_branch(repo: git.Repo, branch_type: str, contains: str | None = None, not_contains: str | None = None) -> str:\n    # Defense in depth: reject values starting with '-' to prevent flag injection\n    if contains and contains.startswith(\"-\"):\n        raise BadName(f\"Invalid contains value: '{contains}' - cannot start with '-'\")\n    if not_contains and not_contains.startswith(\"-\"):\n        raise BadName(f\"Invalid not_contains value: '{not_contains}' - cannot start with '-'\")\n\n    match contains:\n        case None:\n            contains_sha = (None,)\n        case _:\n            contains_sha = (\"--contains\", contains)\n\n    match not_contains:\n        case None:\n            not_contains_sha = (None,)\n        case _:\n            not_contains_sha = (\"--no-contains\", not_contains)\n\n    match branch_type:\n        case 'local':\n            b_type = None\n        case 'remote':\n            b_type = \"-r\"\n        case 'all':\n            b_type = \"-a\"\n        case _:\n            return f\"Invalid branch type: {branch_type}\"\n\n    # None value will be auto deleted by GitPython\n    branch_info = repo.git.branch(b_type, *contains_sha, *not_contains_sha)\n\n    return branch_info\n\n\nasync def serve(repository: Path | None) -> None:\n    logger = logging.getLogger(__name__)\n\n    if repository is not None:\n        try:\n            git.Repo(repository)\n            logger.info(f\"Using repository at {repository}\")\n        except git.InvalidGitRepositoryError:\n            logger.error(f\"{repository} is not a valid Git repository\")\n            return\n\n    server = Server(\"mcp-git\")\n\n    @server.list_tools()\n    async def list_tools() -> list[Tool]:\n        return [\n            Tool(\n                name=GitTools.STATUS,\n                description=\"Shows the working tree status\",\n                inputSchema=GitStatus.model_json_schema(),\n                annotations=ToolAnnotations(\n                    readOnlyHint=True,\n                    destructiveHint=False,\n                    idempotentHint=True,\n                    openWorldHint=False,\n                ),\n            ),\n            Tool(\n                name=GitTools.DIFF_UNSTAGED,\n                description=\"Shows changes in the working directory that are not yet staged\",\n                inputSchema=GitDiffUnstaged.model_json_schema(),\n                annotations=ToolAnnotations(\n                    readOnlyHint=True,\n                    destructiveHint=False,\n                    idempotentHint=True,\n                    openWorldHint=False,\n                ),\n            ),\n            Tool(\n                name=GitTools.DIFF_STAGED,\n                description=\"Shows changes that are staged for commit\",\n                inputSchema=GitDiffStaged.model_json_schema(),\n                annotations=ToolAnnotations(\n                    readOnlyHint=True,\n                    destructiveHint=False,\n                    idempotentHint=True,\n                    openWorldHint=False,\n                ),\n            ),\n            Tool(\n                name=GitTools.DIFF,\n                description=\"Shows differences between branches or commits\",\n                inputSchema=GitDiff.model_json_schema(),\n                annotations=ToolAnnotations(\n                    readOnlyHint=True,\n                    destructiveHint=False,\n                    idempotentHint=True,\n                    openWorldHint=False,\n                ),\n            ),\n            Tool(\n                name=GitTools.COMMIT,\n                description=\"Records changes to the repository\",\n                inputSchema=GitCommit.model_json_schema(),\n                annotations=ToolAnnotations(\n                    readOnlyHint=False,\n                    destructiveHint=False,\n                    idempotentHint=False,\n                    openWorldHint=False,\n                ),\n            ),\n            Tool(\n                name=GitTools.ADD,\n                description=\"Adds file contents to the staging area\",\n                inputSchema=GitAdd.model_json_schema(),\n                annotations=ToolAnnotations(\n                    readOnlyHint=False,\n                    destructiveHint=False,\n                    idempotentHint=True,\n                    openWorldHint=False,\n                ),\n            ),\n            Tool(\n                name=GitTools.RESET,\n                description=\"Unstages all staged changes\",\n                inputSchema=GitReset.model_json_schema(),\n                annotations=ToolAnnotations(\n                    readOnlyHint=False,\n                    destructiveHint=True,\n                    idempotentHint=True,\n                    openWorldHint=False,\n                ),\n            ),\n            Tool(\n                name=GitTools.LOG,\n                description=\"Shows the commit logs\",\n                inputSchema=GitLog.model_json_schema(),\n                annotations=ToolAnnotations(\n                    readOnlyHint=True,\n                    destructiveHint=False,\n                    idempotentHint=True,\n                    openWorldHint=False,\n                ),\n            ),\n            Tool(\n                name=GitTools.CREATE_BRANCH,\n                description=\"Creates a new branch from an optional base branch\",\n                inputSchema=GitCreateBranch.model_json_schema(),\n                annotations=ToolAnnotations(\n                    readOnlyHint=False,\n                    destructiveHint=False,\n                    idempotentHint=False,\n                    openWorldHint=False,\n                ),\n            ),\n            Tool(\n                name=GitTools.CHECKOUT,\n                description=\"Switches branches\",\n                inputSchema=GitCheckout.model_json_schema(),\n                annotations=ToolAnnotations(\n                    readOnlyHint=False,\n                    destructiveHint=False,\n                    idempotentHint=False,\n                    openWorldHint=False,\n                ),\n            ),\n            Tool(\n                name=GitTools.SHOW,\n                description=\"Shows the contents of a commit\",\n                inputSchema=GitShow.model_json_schema(),\n                annotations=ToolAnnotations(\n                    readOnlyHint=True,\n                    destructiveHint=False,\n                    idempotentHint=True,\n                    openWorldHint=False,\n                ),\n            ),\n            Tool(\n                name=GitTools.BRANCH,\n                description=\"List Git branches\",\n                inputSchema=GitBranch.model_json_schema(),\n                annotations=ToolAnnotations(\n                    readOnlyHint=True,\n                    destructiveHint=False,\n                    idempotentHint=True,\n                    openWorldHint=False,\n                ),\n            )\n        ]\n\n    async def list_repos() -> Sequence[str]:\n        async def by_roots() -> Sequence[str]:\n            if not isinstance(server.request_context.session, ServerSession):\n                raise TypeError(\"server.request_context.session must be a ServerSession\")\n\n            if not server.request_context.session.check_client_capability(\n                ClientCapabilities(roots=RootsCapability())\n            ):\n                return []\n\n            roots_result: ListRootsResult = await server.request_context.session.list_roots()\n            logger.debug(f\"Roots result: {roots_result}\")\n            repo_paths = []\n            for root in roots_result.roots:\n                path = root.uri.path\n                try:\n                    git.Repo(path)\n                    repo_paths.append(str(path))\n                except git.InvalidGitRepositoryError:\n                    pass\n            return repo_paths\n\n        def by_commandline() -> Sequence[str]:\n            return [str(repository)] if repository is not None else []\n\n        cmd_repos = by_commandline()\n        root_repos = await by_roots()\n        return [*root_repos, *cmd_repos]\n\n    @server.call_tool()\n    async def call_tool(name: str, arguments: dict) -> list[TextContent]:\n        repo_path = Path(arguments[\"repo_path\"])\n\n        # Validate repo_path is within allowed repository\n        validate_repo_path(repo_path, repository)\n\n        # For all commands, we need an existing repo\n        repo = git.Repo(repo_path)\n\n        match name:\n            case GitTools.STATUS:\n                status = git_status(repo)\n                return [TextContent(\n                    type=\"text\",\n                    text=f\"Repository status:\\n{status}\"\n                )]\n\n            case GitTools.DIFF_UNSTAGED:\n                diff = git_diff_unstaged(repo, arguments.get(\"context_lines\", DEFAULT_CONTEXT_LINES))\n                return [TextContent(\n                    type=\"text\",\n                    text=f\"Unstaged changes:\\n{diff}\"\n                )]\n\n            case GitTools.DIFF_STAGED:\n                diff = git_diff_staged(repo, arguments.get(\"context_lines\", DEFAULT_CONTEXT_LINES))\n                return [TextContent(\n                    type=\"text\",\n                    text=f\"Staged changes:\\n{diff}\"\n                )]\n\n            case GitTools.DIFF:\n                diff = git_diff(repo, arguments[\"target\"], arguments.get(\"context_lines\", DEFAULT_CONTEXT_LINES))\n                return [TextContent(\n                    type=\"text\",\n                    text=f\"Diff with {arguments['target']}:\\n{diff}\"\n                )]\n\n            case GitTools.COMMIT:\n                result = git_commit(repo, arguments[\"message\"])\n                return [TextContent(\n                    type=\"text\",\n                    text=result\n                )]\n\n            case GitTools.ADD:\n                result = git_add(repo, arguments[\"files\"])\n                return [TextContent(\n                    type=\"text\",\n                    text=result\n                )]\n\n            case GitTools.RESET:\n                result = git_reset(repo)\n                return [TextContent(\n                    type=\"text\",\n                    text=result\n                )]\n\n            # Update the LOG case:\n            case GitTools.LOG:\n                log = git_log(\n                    repo,\n                    arguments.get(\"max_count\", 10),\n                    arguments.get(\"start_timestamp\"),\n                    arguments.get(\"end_timestamp\")\n                )\n                return [TextContent(\n                    type=\"text\",\n                    text=\"Commit history:\\n\" + \"\\n\".join(log)\n                )]\n\n            case GitTools.CREATE_BRANCH:\n                result = git_create_branch(\n                    repo,\n                    arguments[\"branch_name\"],\n                    arguments.get(\"base_branch\")\n                )\n                return [TextContent(\n                    type=\"text\",\n                    text=result\n                )]\n\n            case GitTools.CHECKOUT:\n                result = git_checkout(repo, arguments[\"branch_name\"])\n                return [TextContent(\n                    type=\"text\",\n                    text=result\n                )]\n\n            case GitTools.SHOW:\n                result = git_show(repo, arguments[\"revision\"])\n                return [TextContent(\n                    type=\"text\",\n                    text=result\n                )]\n\n            case GitTools.BRANCH:\n                result = git_branch(\n                    repo,\n                    arguments.get(\"branch_type\", 'local'),\n                    arguments.get(\"contains\", None),\n                    arguments.get(\"not_contains\", None),\n                )\n                return [TextContent(\n                    type=\"text\",\n                    text=result\n                )]\n\n            case _:\n                raise ValueError(f\"Unknown tool: {name}\")\n\n    options = server.create_initialization_options()\n    async with stdio_server() as (read_stream, write_stream):\n        await server.run(read_stream, write_stream, options, raise_exceptions=True)\n"
  },
  {
    "path": "src/git/tests/test_server.py",
    "content": "import pytest\nfrom pathlib import Path\nimport git\nfrom git.exc import BadName\nfrom mcp_server_git.server import (\n    git_checkout,\n    git_branch,\n    git_add,\n    git_status,\n    git_diff_unstaged,\n    git_diff_staged,\n    git_diff,\n    git_commit,\n    git_reset,\n    git_log,\n    git_create_branch,\n    git_show,\n    validate_repo_path,\n)\nimport shutil\n\n@pytest.fixture\ndef test_repository(tmp_path: Path):\n    repo_path = tmp_path / \"temp_test_repo\"\n    test_repo = git.Repo.init(repo_path)\n\n    Path(repo_path / \"test.txt\").write_text(\"test\")\n    test_repo.index.add([\"test.txt\"])\n    test_repo.index.commit(\"initial commit\")\n\n    yield test_repo\n\n    shutil.rmtree(repo_path)\n\ndef test_git_checkout_existing_branch(test_repository):\n    test_repository.git.branch(\"test-branch\")\n    result = git_checkout(test_repository, \"test-branch\")\n\n    assert \"Switched to branch 'test-branch'\" in result\n    assert test_repository.active_branch.name == \"test-branch\"\n\ndef test_git_checkout_nonexistent_branch(test_repository):\n\n    with pytest.raises(BadName):\n        git_checkout(test_repository, \"nonexistent-branch\")\n\ndef test_git_branch_local(test_repository):\n    test_repository.git.branch(\"new-branch-local\")\n    result = git_branch(test_repository, \"local\")\n    assert \"new-branch-local\" in result\n\ndef test_git_branch_remote(test_repository):\n    result = git_branch(test_repository, \"remote\")\n    assert \"\" == result.strip()  # Should be empty if no remote branches\n\ndef test_git_branch_all(test_repository):\n    test_repository.git.branch(\"new-branch-all\")\n    result = git_branch(test_repository, \"all\")\n    assert \"new-branch-all\" in result\n\ndef test_git_branch_contains(test_repository):\n    # Get the default branch name (could be \"main\" or \"master\")\n    default_branch = test_repository.active_branch.name\n    # Create a new branch and commit to it\n    test_repository.git.checkout(\"-b\", \"feature-branch\")\n    Path(test_repository.working_dir / Path(\"feature.txt\")).write_text(\"feature content\")\n    test_repository.index.add([\"feature.txt\"])\n    commit = test_repository.index.commit(\"feature commit\")\n    test_repository.git.checkout(default_branch)\n\n    result = git_branch(test_repository, \"local\", contains=commit.hexsha)\n    assert \"feature-branch\" in result\n    assert default_branch not in result\n\ndef test_git_branch_not_contains(test_repository):\n    # Get the default branch name (could be \"main\" or \"master\")\n    default_branch = test_repository.active_branch.name\n    # Create a new branch and commit to it\n    test_repository.git.checkout(\"-b\", \"another-feature-branch\")\n    Path(test_repository.working_dir / Path(\"another_feature.txt\")).write_text(\"another feature content\")\n    test_repository.index.add([\"another_feature.txt\"])\n    commit = test_repository.index.commit(\"another feature commit\")\n    test_repository.git.checkout(default_branch)\n\n    result = git_branch(test_repository, \"local\", not_contains=commit.hexsha)\n    assert \"another-feature-branch\" not in result\n    assert default_branch in result\n\ndef test_git_add_all_files(test_repository):\n    file_path = Path(test_repository.working_dir) / \"all_file.txt\"\n    file_path.write_text(\"adding all\")\n\n    result = git_add(test_repository, [\".\"])\n\n    staged_files = [item.a_path for item in test_repository.index.diff(\"HEAD\")]\n    assert \"all_file.txt\" in staged_files\n    assert result == \"Files staged successfully\"\n\ndef test_git_add_specific_files(test_repository):\n    file1 = Path(test_repository.working_dir) / \"file1.txt\"\n    file2 = Path(test_repository.working_dir) / \"file2.txt\"\n    file1.write_text(\"file 1 content\")\n    file2.write_text(\"file 2 content\")\n\n    result = git_add(test_repository, [\"file1.txt\"])\n\n    staged_files = [item.a_path for item in test_repository.index.diff(\"HEAD\")]\n    assert \"file1.txt\" in staged_files\n    assert \"file2.txt\" not in staged_files\n    assert result == \"Files staged successfully\"\n\ndef test_git_status(test_repository):\n    result = git_status(test_repository)\n\n    assert result is not None\n    assert \"On branch\" in result or \"branch\" in result.lower()\n\ndef test_git_diff_unstaged(test_repository):\n    file_path = Path(test_repository.working_dir) / \"test.txt\"\n    file_path.write_text(\"modified content\")\n\n    result = git_diff_unstaged(test_repository)\n\n    assert \"test.txt\" in result\n    assert \"modified content\" in result\n\ndef test_git_diff_unstaged_empty(test_repository):\n    result = git_diff_unstaged(test_repository)\n\n    assert result == \"\"\n\ndef test_git_diff_staged(test_repository):\n    file_path = Path(test_repository.working_dir) / \"staged_file.txt\"\n    file_path.write_text(\"staged content\")\n    test_repository.index.add([\"staged_file.txt\"])\n\n    result = git_diff_staged(test_repository)\n\n    assert \"staged_file.txt\" in result\n    assert \"staged content\" in result\n\ndef test_git_diff_staged_empty(test_repository):\n    result = git_diff_staged(test_repository)\n\n    assert result == \"\"\n\ndef test_git_diff(test_repository):\n    # Get the default branch name (could be \"main\" or \"master\")\n    default_branch = test_repository.active_branch.name\n    test_repository.git.checkout(\"-b\", \"feature-diff\")\n    file_path = Path(test_repository.working_dir) / \"test.txt\"\n    file_path.write_text(\"feature changes\")\n    test_repository.index.add([\"test.txt\"])\n    test_repository.index.commit(\"feature commit\")\n\n    result = git_diff(test_repository, default_branch)\n\n    assert \"test.txt\" in result\n    assert \"feature changes\" in result\n\ndef test_git_commit(test_repository):\n    file_path = Path(test_repository.working_dir) / \"commit_test.txt\"\n    file_path.write_text(\"content to commit\")\n    test_repository.index.add([\"commit_test.txt\"])\n\n    result = git_commit(test_repository, \"test commit message\")\n\n    assert \"Changes committed successfully with hash\" in result\n\n    latest_commit = test_repository.head.commit\n    assert latest_commit.message.strip() == \"test commit message\"\n\ndef test_git_reset(test_repository):\n    file_path = Path(test_repository.working_dir) / \"reset_test.txt\"\n    file_path.write_text(\"content to reset\")\n    test_repository.index.add([\"reset_test.txt\"])\n\n    staged_before = [item.a_path for item in test_repository.index.diff(\"HEAD\")]\n    assert \"reset_test.txt\" in staged_before\n\n    result = git_reset(test_repository)\n\n    assert result == \"All staged changes reset\"\n\n    staged_after = [item.a_path for item in test_repository.index.diff(\"HEAD\")]\n    assert \"reset_test.txt\" not in staged_after\n\ndef test_git_log(test_repository):\n    for i in range(3):\n        file_path = Path(test_repository.working_dir) / f\"log_test_{i}.txt\"\n        file_path.write_text(f\"content {i}\")\n        test_repository.index.add([f\"log_test_{i}.txt\"])\n        test_repository.index.commit(f\"commit {i}\")\n\n    result = git_log(test_repository, max_count=2)\n\n    assert isinstance(result, list)\n    assert len(result) == 2\n    assert \"Commit:\" in result[0]\n    assert \"Author:\" in result[0]\n    assert \"Date:\" in result[0]\n    assert \"Message:\" in result[0]\n\ndef test_git_log_default(test_repository):\n    result = git_log(test_repository)\n\n    assert isinstance(result, list)\n    assert len(result) >= 1\n    assert \"initial commit\" in result[0]\n\ndef test_git_create_branch(test_repository):\n    result = git_create_branch(test_repository, \"new-feature-branch\")\n\n    assert \"Created branch 'new-feature-branch'\" in result\n\n    branches = [ref.name for ref in test_repository.references]\n    assert \"new-feature-branch\" in branches\n\ndef test_git_create_branch_from_base(test_repository):\n    test_repository.git.checkout(\"-b\", \"base-branch\")\n    file_path = Path(test_repository.working_dir) / \"base.txt\"\n    file_path.write_text(\"base content\")\n    test_repository.index.add([\"base.txt\"])\n    test_repository.index.commit(\"base commit\")\n\n    result = git_create_branch(test_repository, \"derived-branch\", \"base-branch\")\n\n    assert \"Created branch 'derived-branch' from 'base-branch'\" in result\n\ndef test_git_show(test_repository):\n    file_path = Path(test_repository.working_dir) / \"show_test.txt\"\n    file_path.write_text(\"show content\")\n    test_repository.index.add([\"show_test.txt\"])\n    test_repository.index.commit(\"show test commit\")\n\n    commit_sha = test_repository.head.commit.hexsha\n\n    result = git_show(test_repository, commit_sha)\n\n    assert \"Commit:\" in result\n    assert \"Author:\" in result\n    assert \"show test commit\" in result\n    assert \"show_test.txt\" in result\n\ndef test_git_show_initial_commit(test_repository):\n    initial_commit = list(test_repository.iter_commits())[-1]\n\n    result = git_show(test_repository, initial_commit.hexsha)\n\n    assert \"Commit:\" in result\n    assert \"initial commit\" in result\n    assert \"test.txt\" in result\n\n\n# Tests for validate_repo_path (repository scoping security fix)\n\ndef test_validate_repo_path_no_restriction():\n    \"\"\"When no repository restriction is configured, any path should be allowed.\"\"\"\n    validate_repo_path(Path(\"/any/path\"), None)  # Should not raise\n\n\ndef test_validate_repo_path_exact_match(tmp_path: Path):\n    \"\"\"When repo_path exactly matches allowed_repository, validation should pass.\"\"\"\n    allowed = tmp_path / \"repo\"\n    allowed.mkdir()\n    validate_repo_path(allowed, allowed)  # Should not raise\n\n\ndef test_validate_repo_path_subdirectory(tmp_path: Path):\n    \"\"\"When repo_path is a subdirectory of allowed_repository, validation should pass.\"\"\"\n    allowed = tmp_path / \"repo\"\n    allowed.mkdir()\n    subdir = allowed / \"subdir\"\n    subdir.mkdir()\n    validate_repo_path(subdir, allowed)  # Should not raise\n\n\ndef test_validate_repo_path_outside_allowed(tmp_path: Path):\n    \"\"\"When repo_path is outside allowed_repository, validation should raise ValueError.\"\"\"\n    allowed = tmp_path / \"allowed_repo\"\n    allowed.mkdir()\n    outside = tmp_path / \"other_repo\"\n    outside.mkdir()\n\n    with pytest.raises(ValueError) as exc_info:\n        validate_repo_path(outside, allowed)\n    assert \"outside the allowed repository\" in str(exc_info.value)\n\n\ndef test_validate_repo_path_traversal_attempt(tmp_path: Path):\n    \"\"\"Path traversal attempts (../) should be caught and rejected.\"\"\"\n    allowed = tmp_path / \"allowed_repo\"\n    allowed.mkdir()\n    # Attempt to escape via ../\n    traversal_path = allowed / \"..\" / \"other_repo\"\n\n    with pytest.raises(ValueError) as exc_info:\n        validate_repo_path(traversal_path, allowed)\n    assert \"outside the allowed repository\" in str(exc_info.value)\n\n\ndef test_validate_repo_path_symlink_escape(tmp_path: Path):\n    \"\"\"Symlinks pointing outside allowed_repository should be rejected.\"\"\"\n    allowed = tmp_path / \"allowed_repo\"\n    allowed.mkdir()\n    outside = tmp_path / \"outside\"\n    outside.mkdir()\n\n    # Create a symlink inside allowed that points outside\n    symlink = allowed / \"escape_link\"\n    symlink.symlink_to(outside)\n\n    with pytest.raises(ValueError) as exc_info:\n        validate_repo_path(symlink, allowed)\n    assert \"outside the allowed repository\" in str(exc_info.value)\n# Tests for argument injection protection\n\ndef test_git_diff_rejects_flag_injection(test_repository):\n    \"\"\"git_diff should reject flags that could be used for argument injection.\"\"\"\n    with pytest.raises(BadName):\n        git_diff(test_repository, \"--output=/tmp/evil\")\n\n    with pytest.raises(BadName):\n        git_diff(test_repository, \"--help\")\n\n    with pytest.raises(BadName):\n        git_diff(test_repository, \"-p\")\n\n\ndef test_git_checkout_rejects_flag_injection(test_repository):\n    \"\"\"git_checkout should reject flags that could be used for argument injection.\"\"\"\n    with pytest.raises(BadName):\n        git_checkout(test_repository, \"--help\")\n\n    with pytest.raises(BadName):\n        git_checkout(test_repository, \"--orphan=evil\")\n\n    with pytest.raises(BadName):\n        git_checkout(test_repository, \"-f\")\n\n\ndef test_git_diff_allows_valid_refs(test_repository):\n    \"\"\"git_diff should work normally with valid git refs.\"\"\"\n    # Get the default branch name\n    default_branch = test_repository.active_branch.name\n\n    # Create a branch with a commit for diffing\n    test_repository.git.checkout(\"-b\", \"valid-diff-branch\")\n    file_path = Path(test_repository.working_dir) / \"test.txt\"\n    file_path.write_text(\"valid diff content\")\n    test_repository.index.add([\"test.txt\"])\n    test_repository.index.commit(\"valid diff commit\")\n\n    # Test with branch name\n    result = git_diff(test_repository, default_branch)\n    assert \"test.txt\" in result\n\n    # Test with HEAD~1\n    result = git_diff(test_repository, \"HEAD~1\")\n    assert \"test.txt\" in result\n\n    # Test with commit hash\n    commit_sha = test_repository.head.commit.hexsha\n    result = git_diff(test_repository, commit_sha)\n    assert result is not None\n\n\ndef test_git_checkout_allows_valid_branches(test_repository):\n    \"\"\"git_checkout should work normally with valid branch names.\"\"\"\n    # Get the default branch name\n    default_branch = test_repository.active_branch.name\n\n    # Create a branch to checkout\n    test_repository.git.branch(\"valid-checkout-branch\")\n\n    result = git_checkout(test_repository, \"valid-checkout-branch\")\n    assert \"Switched to branch 'valid-checkout-branch'\" in result\n    assert test_repository.active_branch.name == \"valid-checkout-branch\"\n\n    # Checkout back to default branch\n    result = git_checkout(test_repository, default_branch)\n    assert \"Switched to branch\" in result\n    assert test_repository.active_branch.name == default_branch\n\n\ndef test_git_diff_rejects_malicious_refs(test_repository):\n    \"\"\"git_diff should reject refs starting with '-' even if they exist.\n\n    This tests defense in depth against an attacker who creates malicious\n    refs via filesystem manipulation (e.g. using mcp-filesystem to write\n    to .git/refs/heads/--output=...).\n    \"\"\"\n    import os\n\n    # Manually create a malicious ref by writing directly to .git/refs\n    sha = test_repository.head.commit.hexsha\n    refs_dir = Path(test_repository.git_dir) / \"refs\" / \"heads\"\n    malicious_ref_path = refs_dir / \"--output=evil.txt\"\n    malicious_ref_path.write_text(sha)\n\n    # Even though the ref exists, it should be rejected\n    with pytest.raises(BadName):\n        git_diff(test_repository, \"--output=evil.txt\")\n\n    # Verify no file was created (the attack was blocked)\n    assert not os.path.exists(\"evil.txt\")\n\n    # Cleanup\n    malicious_ref_path.unlink()\n\n\ndef test_git_checkout_rejects_malicious_refs(test_repository):\n    \"\"\"git_checkout should reject refs starting with '-' even if they exist.\"\"\"\n    # Manually create a malicious ref\n    sha = test_repository.head.commit.hexsha\n    refs_dir = Path(test_repository.git_dir) / \"refs\" / \"heads\"\n    malicious_ref_path = refs_dir / \"--orphan=evil\"\n    malicious_ref_path.write_text(sha)\n\n    # Even though the ref exists, it should be rejected\n    with pytest.raises(BadName):\n        git_checkout(test_repository, \"--orphan=evil\")\n\n    # Cleanup\n    malicious_ref_path.unlink()\n\n\n# Tests for argument injection protection in git_show, git_create_branch,\n# git_log, and git_branch — matching the existing guards on git_diff and\n# git_checkout.\n\ndef test_git_show_rejects_flag_injection(test_repository):\n    \"\"\"git_show should reject revisions starting with '-'.\"\"\"\n    with pytest.raises(BadName):\n        git_show(test_repository, \"--output=/tmp/evil\")\n\n    with pytest.raises(BadName):\n        git_show(test_repository, \"-p\")\n\n\ndef test_git_show_rejects_malicious_refs(test_repository):\n    \"\"\"git_show should reject refs starting with '-' even if they exist.\"\"\"\n    sha = test_repository.head.commit.hexsha\n    refs_dir = Path(test_repository.git_dir) / \"refs\" / \"heads\"\n    malicious_ref_path = refs_dir / \"--format=evil\"\n    malicious_ref_path.write_text(sha)\n\n    with pytest.raises(BadName):\n        git_show(test_repository, \"--format=evil\")\n\n    malicious_ref_path.unlink()\n\n\ndef test_git_create_branch_rejects_flag_injection(test_repository):\n    \"\"\"git_create_branch should reject branch names starting with '-'.\"\"\"\n    with pytest.raises(BadName):\n        git_create_branch(test_repository, \"--track=evil\")\n\n    with pytest.raises(BadName):\n        git_create_branch(test_repository, \"-f\")\n\n\ndef test_git_create_branch_rejects_base_branch_flag_injection(test_repository):\n    \"\"\"git_create_branch should reject base branch names starting with '-'.\"\"\"\n    with pytest.raises(BadName):\n        git_create_branch(test_repository, \"new-branch\", \"--track=evil\")\n\n\ndef test_git_log_rejects_timestamp_flag_injection(test_repository):\n    \"\"\"git_log should reject timestamps starting with '-'.\"\"\"\n    with pytest.raises(ValueError):\n        git_log(test_repository, start_timestamp=\"--exec=evil\")\n\n    with pytest.raises(ValueError):\n        git_log(test_repository, end_timestamp=\"--exec=evil\")\n\n\ndef test_git_branch_rejects_contains_flag_injection(test_repository):\n    \"\"\"git_branch should reject contains/not_contains values starting with '-'.\"\"\"\n    with pytest.raises(BadName):\n        git_branch(test_repository, \"local\", contains=\"--exec=evil\")\n\n    with pytest.raises(BadName):\n        git_branch(test_repository, \"local\", not_contains=\"--exec=evil\")\n"
  },
  {
    "path": "src/memory/Dockerfile",
    "content": "FROM node:22.12-alpine AS builder\n\nCOPY src/memory /app\nCOPY tsconfig.json /tsconfig.json\n\nWORKDIR /app\n\nRUN --mount=type=cache,target=/root/.npm npm install\n\nRUN --mount=type=cache,target=/root/.npm-production npm ci --ignore-scripts --omit-dev\n\nFROM node:22-alpine AS release\n\nCOPY --from=builder /app/dist /app/dist\nCOPY --from=builder /app/package.json /app/package.json\nCOPY --from=builder /app/package-lock.json /app/package-lock.json\n\nENV NODE_ENV=production\n\nWORKDIR /app\n\nRUN npm ci --ignore-scripts --omit-dev\n\nENTRYPOINT [\"node\", \"dist/index.js\"]"
  },
  {
    "path": "src/memory/README.md",
    "content": "# Knowledge Graph Memory Server\n\nA basic implementation of persistent memory using a local knowledge graph. This lets Claude remember information about the user across chats.\n\n## Core Concepts\n\n### Entities\nEntities are the primary nodes in the knowledge graph. Each entity has:\n- A unique name (identifier)\n- An entity type (e.g., \"person\", \"organization\", \"event\")\n- A list of observations\n\nExample:\n```json\n{\n  \"name\": \"John_Smith\",\n  \"entityType\": \"person\",\n  \"observations\": [\"Speaks fluent Spanish\"]\n}\n```\n\n### Relations\nRelations define directed connections between entities. They are always stored in active voice and describe how entities interact or relate to each other.\n\nExample:\n```json\n{\n  \"from\": \"John_Smith\",\n  \"to\": \"Anthropic\",\n  \"relationType\": \"works_at\"\n}\n```\n### Observations\nObservations are discrete pieces of information about an entity. They are:\n\n- Stored as strings\n- Attached to specific entities\n- Can be added or removed independently\n- Should be atomic (one fact per observation)\n\nExample:\n```json\n{\n  \"entityName\": \"John_Smith\",\n  \"observations\": [\n    \"Speaks fluent Spanish\",\n    \"Graduated in 2019\",\n    \"Prefers morning meetings\"\n  ]\n}\n```\n\n## API\n\n### Tools\n- **create_entities**\n  - Create multiple new entities in the knowledge graph\n  - Input: `entities` (array of objects)\n    - Each object contains:\n      - `name` (string): Entity identifier\n      - `entityType` (string): Type classification\n      - `observations` (string[]): Associated observations\n  - Ignores entities with existing names\n\n- **create_relations**\n  - Create multiple new relations between entities\n  - Input: `relations` (array of objects)\n    - Each object contains:\n      - `from` (string): Source entity name\n      - `to` (string): Target entity name\n      - `relationType` (string): Relationship type in active voice\n  - Skips duplicate relations\n\n- **add_observations**\n  - Add new observations to existing entities\n  - Input: `observations` (array of objects)\n    - Each object contains:\n      - `entityName` (string): Target entity\n      - `contents` (string[]): New observations to add\n  - Returns added observations per entity\n  - Fails if entity doesn't exist\n\n- **delete_entities**\n  - Remove entities and their relations\n  - Input: `entityNames` (string[])\n  - Cascading deletion of associated relations\n  - Silent operation if entity doesn't exist\n\n- **delete_observations**\n  - Remove specific observations from entities\n  - Input: `deletions` (array of objects)\n    - Each object contains:\n      - `entityName` (string): Target entity\n      - `observations` (string[]): Observations to remove\n  - Silent operation if observation doesn't exist\n\n- **delete_relations**\n  - Remove specific relations from the graph\n  - Input: `relations` (array of objects)\n    - Each object contains:\n      - `from` (string): Source entity name\n      - `to` (string): Target entity name\n      - `relationType` (string): Relationship type\n  - Silent operation if relation doesn't exist\n\n- **read_graph**\n  - Read the entire knowledge graph\n  - No input required\n  - Returns complete graph structure with all entities and relations\n\n- **search_nodes**\n  - Search for nodes based on query\n  - Input: `query` (string)\n  - Searches across:\n    - Entity names\n    - Entity types\n    - Observation content\n  - Returns matching entities and their relations\n\n- **open_nodes**\n  - Retrieve specific nodes by name\n  - Input: `names` (string[])\n  - Returns:\n    - Requested entities\n    - Relations between requested entities\n  - Silently skips non-existent nodes\n\n# Usage with Claude Desktop\n\n### Setup\n\nAdd this to your claude_desktop_config.json:\n\n#### Docker\n\n```json\n{\n  \"mcpServers\": {\n    \"memory\": {\n      \"command\": \"docker\",\n      \"args\": [\"run\", \"-i\", \"-v\", \"claude-memory:/app/dist\", \"--rm\", \"mcp/memory\"]\n    }\n  }\n}\n```\n\n#### NPX\n```json\n{\n  \"mcpServers\": {\n    \"memory\": {\n      \"command\": \"npx\",\n      \"args\": [\n        \"-y\",\n        \"@modelcontextprotocol/server-memory\"\n      ]\n    }\n  }\n}\n```\n\n#### NPX with custom setting\n\nThe server can be configured using the following environment variables:\n\n```json\n{\n  \"mcpServers\": {\n    \"memory\": {\n      \"command\": \"npx\",\n      \"args\": [\n        \"-y\",\n        \"@modelcontextprotocol/server-memory\"\n      ],\n      \"env\": {\n        \"MEMORY_FILE_PATH\": \"/path/to/custom/memory.jsonl\"\n      }\n    }\n  }\n}\n```\n\n- `MEMORY_FILE_PATH`: Path to the memory storage JSONL file (default: `memory.jsonl` in the server directory)\n\n# VS Code Installation Instructions\n\nFor quick installation, use one of the one-click installation buttons below:\n\n[![Install with NPX in VS Code](https://img.shields.io/badge/VS_Code-NPM-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=memory&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%40modelcontextprotocol%2Fserver-memory%22%5D%7D) [![Install with NPX in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-NPM-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=memory&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%40modelcontextprotocol%2Fserver-memory%22%5D%7D&quality=insiders)\n\n[![Install with Docker in VS Code](https://img.shields.io/badge/VS_Code-Docker-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=memory&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22-v%22%2C%22claude-memory%3A%2Fapp%2Fdist%22%2C%22--rm%22%2C%22mcp%2Fmemory%22%5D%7D) [![Install with Docker in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Docker-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=memory&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22-v%22%2C%22claude-memory%3A%2Fapp%2Fdist%22%2C%22--rm%22%2C%22mcp%2Fmemory%22%5D%7D&quality=insiders)\n\nFor manual installation, you can configure the MCP server using one of these methods:\n\n**Method 1: User Configuration (Recommended)**\nAdd the configuration to your user-level MCP configuration file. Open the Command Palette (`Ctrl + Shift + P`) and run `MCP: Open User Configuration`. This will open your user `mcp.json` file where you can add the server configuration.\n\n**Method 2: Workspace Configuration**\nAlternatively, you can add the configuration to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others.\n\n> For more details about MCP configuration in VS Code, see the [official VS Code MCP documentation](https://code.visualstudio.com/docs/copilot/customization/mcp-servers).\n\n#### NPX\n\n```json\n{\n  \"servers\": {\n    \"memory\": {\n      \"command\": \"npx\",\n      \"args\": [\n        \"-y\",\n        \"@modelcontextprotocol/server-memory\"\n      ]\n    }\n  }\n}\n```\n\n#### Docker\n\n```json\n{\n  \"servers\": {\n    \"memory\": {\n      \"command\": \"docker\",\n      \"args\": [\n        \"run\",\n        \"-i\",\n        \"-v\",\n        \"claude-memory:/app/dist\",\n        \"--rm\",\n        \"mcp/memory\"\n      ]\n    }\n  }\n}\n```\n\n### System Prompt\n\nThe prompt for utilizing memory depends on the use case. Changing the prompt will help the model determine the frequency and types of memories created.\n\nHere is an example prompt for chat personalization. You could use this prompt in the \"Custom Instructions\" field of a [Claude.ai Project](https://www.anthropic.com/news/projects). \n\n```\nFollow these steps for each interaction:\n\n1. User Identification:\n   - You should assume that you are interacting with default_user\n   - If you have not identified default_user, proactively try to do so.\n\n2. Memory Retrieval:\n   - Always begin your chat by saying only \"Remembering...\" and retrieve all relevant information from your knowledge graph\n   - Always refer to your knowledge graph as your \"memory\"\n\n3. Memory\n   - While conversing with the user, be attentive to any new information that falls into these categories:\n     a) Basic Identity (age, gender, location, job title, education level, etc.)\n     b) Behaviors (interests, habits, etc.)\n     c) Preferences (communication style, preferred language, etc.)\n     d) Goals (goals, targets, aspirations, etc.)\n     e) Relationships (personal and professional relationships up to 3 degrees of separation)\n\n4. Memory Update:\n   - If any new information was gathered during the interaction, update your memory as follows:\n     a) Create entities for recurring organizations, people, and significant events\n     b) Connect them to the current entities using relations\n     c) Store facts about them as observations\n```\n\n## Building\n\nDocker:\n\n```sh\ndocker build -t mcp/memory -f src/memory/Dockerfile . \n```\n\nFor Awareness: a prior mcp/memory volume contains an index.js file that could be overwritten by the new container. If you are using a docker volume for storage, delete the old docker volume's `index.js` file before starting the new container.\n\n## License\n\nThis MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository.\n"
  },
  {
    "path": "src/memory/__tests__/file-path.test.ts",
    "content": "import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';\nimport { promises as fs } from 'fs';\nimport path from 'path';\nimport { fileURLToPath } from 'url';\nimport { ensureMemoryFilePath, defaultMemoryPath } from '../index.js';\n\ndescribe('ensureMemoryFilePath', () => {\n  const testDir = path.dirname(fileURLToPath(import.meta.url));\n  const oldMemoryPath = path.join(testDir, '..', 'memory.json');\n  const newMemoryPath = path.join(testDir, '..', 'memory.jsonl');\n\n  let originalEnv: string | undefined;\n\n  beforeEach(() => {\n    // Save original environment variable\n    originalEnv = process.env.MEMORY_FILE_PATH;\n    // Delete environment variable\n    delete process.env.MEMORY_FILE_PATH;\n  });\n\n  afterEach(async () => {\n    // Restore original environment variable\n    if (originalEnv !== undefined) {\n      process.env.MEMORY_FILE_PATH = originalEnv;\n    } else {\n      delete process.env.MEMORY_FILE_PATH;\n    }\n\n    // Clean up test files\n    try {\n      await fs.unlink(oldMemoryPath);\n    } catch {\n      // Ignore if file doesn't exist\n    }\n    try {\n      await fs.unlink(newMemoryPath);\n    } catch {\n      // Ignore if file doesn't exist\n    }\n  });\n\n  describe('with MEMORY_FILE_PATH environment variable', () => {\n    it('should return absolute path when MEMORY_FILE_PATH is absolute', async () => {\n      const absolutePath = '/tmp/custom-memory.jsonl';\n      process.env.MEMORY_FILE_PATH = absolutePath;\n\n      const result = await ensureMemoryFilePath();\n\n      expect(result).toBe(absolutePath);\n    });\n\n    it('should convert relative path to absolute when MEMORY_FILE_PATH is relative', async () => {\n      const relativePath = 'custom-memory.jsonl';\n      process.env.MEMORY_FILE_PATH = relativePath;\n\n      const result = await ensureMemoryFilePath();\n\n      expect(path.isAbsolute(result)).toBe(true);\n      expect(result).toContain('custom-memory.jsonl');\n    });\n\n    it('should handle Windows absolute paths', async () => {\n      const windowsPath = 'C:\\\\temp\\\\memory.jsonl';\n      process.env.MEMORY_FILE_PATH = windowsPath;\n\n      const result = await ensureMemoryFilePath();\n\n      // On Windows, should return as-is; on Unix, will be treated as relative\n      if (process.platform === 'win32') {\n        expect(result).toBe(windowsPath);\n      } else {\n        expect(path.isAbsolute(result)).toBe(true);\n      }\n    });\n  });\n\n  describe('without MEMORY_FILE_PATH environment variable', () => {\n    it('should return default path when no files exist', async () => {\n      const result = await ensureMemoryFilePath();\n\n      expect(result).toBe(defaultMemoryPath);\n    });\n\n    it('should migrate from memory.json to memory.jsonl when only old file exists', async () => {\n      // Create old memory.json file\n      await fs.writeFile(oldMemoryPath, '{\"test\":\"data\"}');\n\n      const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});\n\n      const result = await ensureMemoryFilePath();\n\n      expect(result).toBe(defaultMemoryPath);\n\n      // Verify migration happened\n      const newFileExists = await fs.access(newMemoryPath).then(() => true).catch(() => false);\n      const oldFileExists = await fs.access(oldMemoryPath).then(() => true).catch(() => false);\n\n      expect(newFileExists).toBe(true);\n      expect(oldFileExists).toBe(false);\n\n      // Verify console messages\n      expect(consoleErrorSpy).toHaveBeenCalledWith(\n        expect.stringContaining('DETECTED: Found legacy memory.json file')\n      );\n      expect(consoleErrorSpy).toHaveBeenCalledWith(\n        expect.stringContaining('COMPLETED: Successfully migrated')\n      );\n\n      consoleErrorSpy.mockRestore();\n    });\n\n    it('should use new file when both old and new files exist', async () => {\n      // Create both files\n      await fs.writeFile(oldMemoryPath, '{\"old\":\"data\"}');\n      await fs.writeFile(newMemoryPath, '{\"new\":\"data\"}');\n\n      const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});\n\n      const result = await ensureMemoryFilePath();\n\n      expect(result).toBe(defaultMemoryPath);\n\n      // Verify no migration happened (both files should still exist)\n      const newFileExists = await fs.access(newMemoryPath).then(() => true).catch(() => false);\n      const oldFileExists = await fs.access(oldMemoryPath).then(() => true).catch(() => false);\n\n      expect(newFileExists).toBe(true);\n      expect(oldFileExists).toBe(true);\n\n      // Verify no console messages about migration\n      expect(consoleErrorSpy).not.toHaveBeenCalled();\n\n      consoleErrorSpy.mockRestore();\n    });\n\n    it('should preserve file content during migration', async () => {\n      const testContent = '{\"entities\": [{\"name\": \"test\", \"type\": \"person\"}]}';\n      await fs.writeFile(oldMemoryPath, testContent);\n\n      await ensureMemoryFilePath();\n\n      const migratedContent = await fs.readFile(newMemoryPath, 'utf-8');\n      expect(migratedContent).toBe(testContent);\n    });\n  });\n\n  describe('defaultMemoryPath', () => {\n    it('should end with memory.jsonl', () => {\n      expect(defaultMemoryPath).toMatch(/memory\\.jsonl$/);\n    });\n\n    it('should be an absolute path', () => {\n      expect(path.isAbsolute(defaultMemoryPath)).toBe(true);\n    });\n  });\n});\n"
  },
  {
    "path": "src/memory/__tests__/knowledge-graph.test.ts",
    "content": "import { describe, it, expect, beforeEach, afterEach } from 'vitest';\nimport { promises as fs } from 'fs';\nimport path from 'path';\nimport { fileURLToPath } from 'url';\nimport { KnowledgeGraphManager, Entity, Relation, KnowledgeGraph } from '../index.js';\n\ndescribe('KnowledgeGraphManager', () => {\n  let manager: KnowledgeGraphManager;\n  let testFilePath: string;\n\n  beforeEach(async () => {\n    // Create a temporary test file path\n    testFilePath = path.join(\n      path.dirname(fileURLToPath(import.meta.url)),\n      `test-memory-${Date.now()}.jsonl`\n    );\n    manager = new KnowledgeGraphManager(testFilePath);\n  });\n\n  afterEach(async () => {\n    // Clean up test file\n    try {\n      await fs.unlink(testFilePath);\n    } catch (error) {\n      // Ignore errors if file doesn't exist\n    }\n  });\n\n  describe('createEntities', () => {\n    it('should create new entities', async () => {\n      const entities: Entity[] = [\n        { name: 'Alice', entityType: 'person', observations: ['works at Acme Corp'] },\n        { name: 'Bob', entityType: 'person', observations: ['likes programming'] },\n      ];\n\n      const newEntities = await manager.createEntities(entities);\n      expect(newEntities).toHaveLength(2);\n      expect(newEntities).toEqual(entities);\n\n      const graph = await manager.readGraph();\n      expect(graph.entities).toHaveLength(2);\n    });\n\n    it('should not create duplicate entities', async () => {\n      const entities: Entity[] = [\n        { name: 'Alice', entityType: 'person', observations: ['works at Acme Corp'] },\n      ];\n\n      await manager.createEntities(entities);\n      const newEntities = await manager.createEntities(entities);\n\n      expect(newEntities).toHaveLength(0);\n\n      const graph = await manager.readGraph();\n      expect(graph.entities).toHaveLength(1);\n    });\n\n    it('should handle empty entity arrays', async () => {\n      const newEntities = await manager.createEntities([]);\n      expect(newEntities).toHaveLength(0);\n    });\n  });\n\n  describe('createRelations', () => {\n    it('should create new relations', async () => {\n      await manager.createEntities([\n        { name: 'Alice', entityType: 'person', observations: [] },\n        { name: 'Bob', entityType: 'person', observations: [] },\n      ]);\n\n      const relations: Relation[] = [\n        { from: 'Alice', to: 'Bob', relationType: 'knows' },\n      ];\n\n      const newRelations = await manager.createRelations(relations);\n      expect(newRelations).toHaveLength(1);\n      expect(newRelations).toEqual(relations);\n\n      const graph = await manager.readGraph();\n      expect(graph.relations).toHaveLength(1);\n    });\n\n    it('should not create duplicate relations', async () => {\n      await manager.createEntities([\n        { name: 'Alice', entityType: 'person', observations: [] },\n        { name: 'Bob', entityType: 'person', observations: [] },\n      ]);\n\n      const relations: Relation[] = [\n        { from: 'Alice', to: 'Bob', relationType: 'knows' },\n      ];\n\n      await manager.createRelations(relations);\n      const newRelations = await manager.createRelations(relations);\n\n      expect(newRelations).toHaveLength(0);\n\n      const graph = await manager.readGraph();\n      expect(graph.relations).toHaveLength(1);\n    });\n\n    it('should handle empty relation arrays', async () => {\n      const newRelations = await manager.createRelations([]);\n      expect(newRelations).toHaveLength(0);\n    });\n  });\n\n  describe('addObservations', () => {\n    it('should add observations to existing entities', async () => {\n      await manager.createEntities([\n        { name: 'Alice', entityType: 'person', observations: ['works at Acme Corp'] },\n      ]);\n\n      const results = await manager.addObservations([\n        { entityName: 'Alice', contents: ['likes coffee', 'has a dog'] },\n      ]);\n\n      expect(results).toHaveLength(1);\n      expect(results[0].entityName).toBe('Alice');\n      expect(results[0].addedObservations).toHaveLength(2);\n\n      const graph = await manager.readGraph();\n      const alice = graph.entities.find(e => e.name === 'Alice');\n      expect(alice?.observations).toHaveLength(3);\n    });\n\n    it('should not add duplicate observations', async () => {\n      await manager.createEntities([\n        { name: 'Alice', entityType: 'person', observations: ['works at Acme Corp'] },\n      ]);\n\n      await manager.addObservations([\n        { entityName: 'Alice', contents: ['likes coffee'] },\n      ]);\n\n      const results = await manager.addObservations([\n        { entityName: 'Alice', contents: ['likes coffee', 'has a dog'] },\n      ]);\n\n      expect(results[0].addedObservations).toHaveLength(1);\n      expect(results[0].addedObservations).toContain('has a dog');\n\n      const graph = await manager.readGraph();\n      const alice = graph.entities.find(e => e.name === 'Alice');\n      expect(alice?.observations).toHaveLength(3);\n    });\n\n    it('should throw error for non-existent entity', async () => {\n      await expect(\n        manager.addObservations([\n          { entityName: 'NonExistent', contents: ['some observation'] },\n        ])\n      ).rejects.toThrow('Entity with name NonExistent not found');\n    });\n  });\n\n  describe('deleteEntities', () => {\n    it('should delete entities', async () => {\n      await manager.createEntities([\n        { name: 'Alice', entityType: 'person', observations: [] },\n        { name: 'Bob', entityType: 'person', observations: [] },\n      ]);\n\n      await manager.deleteEntities(['Alice']);\n\n      const graph = await manager.readGraph();\n      expect(graph.entities).toHaveLength(1);\n      expect(graph.entities[0].name).toBe('Bob');\n    });\n\n    it('should cascade delete relations when deleting entities', async () => {\n      await manager.createEntities([\n        { name: 'Alice', entityType: 'person', observations: [] },\n        { name: 'Bob', entityType: 'person', observations: [] },\n        { name: 'Charlie', entityType: 'person', observations: [] },\n      ]);\n\n      await manager.createRelations([\n        { from: 'Alice', to: 'Bob', relationType: 'knows' },\n        { from: 'Bob', to: 'Charlie', relationType: 'knows' },\n      ]);\n\n      await manager.deleteEntities(['Bob']);\n\n      const graph = await manager.readGraph();\n      expect(graph.entities).toHaveLength(2);\n      expect(graph.relations).toHaveLength(0);\n    });\n\n    it('should handle deleting non-existent entities', async () => {\n      await manager.deleteEntities(['NonExistent']);\n      const graph = await manager.readGraph();\n      expect(graph.entities).toHaveLength(0);\n    });\n  });\n\n  describe('deleteObservations', () => {\n    it('should delete observations from entities', async () => {\n      await manager.createEntities([\n        { name: 'Alice', entityType: 'person', observations: ['works at Acme Corp', 'likes coffee'] },\n      ]);\n\n      await manager.deleteObservations([\n        { entityName: 'Alice', observations: ['likes coffee'] },\n      ]);\n\n      const graph = await manager.readGraph();\n      const alice = graph.entities.find(e => e.name === 'Alice');\n      expect(alice?.observations).toHaveLength(1);\n      expect(alice?.observations).toContain('works at Acme Corp');\n    });\n\n    it('should handle deleting from non-existent entities', async () => {\n      await manager.deleteObservations([\n        { entityName: 'NonExistent', observations: ['some observation'] },\n      ]);\n      // Should not throw error\n      const graph = await manager.readGraph();\n      expect(graph.entities).toHaveLength(0);\n    });\n  });\n\n  describe('deleteRelations', () => {\n    it('should delete specific relations', async () => {\n      await manager.createEntities([\n        { name: 'Alice', entityType: 'person', observations: [] },\n        { name: 'Bob', entityType: 'person', observations: [] },\n      ]);\n\n      await manager.createRelations([\n        { from: 'Alice', to: 'Bob', relationType: 'knows' },\n        { from: 'Alice', to: 'Bob', relationType: 'works_with' },\n      ]);\n\n      await manager.deleteRelations([\n        { from: 'Alice', to: 'Bob', relationType: 'knows' },\n      ]);\n\n      const graph = await manager.readGraph();\n      expect(graph.relations).toHaveLength(1);\n      expect(graph.relations[0].relationType).toBe('works_with');\n    });\n  });\n\n  describe('readGraph', () => {\n    it('should return empty graph when file does not exist', async () => {\n      const graph = await manager.readGraph();\n      expect(graph.entities).toHaveLength(0);\n      expect(graph.relations).toHaveLength(0);\n    });\n\n    it('should return complete graph with entities and relations', async () => {\n      await manager.createEntities([\n        { name: 'Alice', entityType: 'person', observations: ['works at Acme Corp'] },\n      ]);\n\n      await manager.createRelations([\n        { from: 'Alice', to: 'Alice', relationType: 'self' },\n      ]);\n\n      const graph = await manager.readGraph();\n      expect(graph.entities).toHaveLength(1);\n      expect(graph.relations).toHaveLength(1);\n    });\n  });\n\n  describe('searchNodes', () => {\n    beforeEach(async () => {\n      await manager.createEntities([\n        { name: 'Alice', entityType: 'person', observations: ['works at Acme Corp', 'likes programming'] },\n        { name: 'Bob', entityType: 'person', observations: ['works at TechCo'] },\n        { name: 'Acme Corp', entityType: 'company', observations: ['tech company'] },\n      ]);\n\n      await manager.createRelations([\n        { from: 'Alice', to: 'Acme Corp', relationType: 'works_at' },\n        { from: 'Bob', to: 'Acme Corp', relationType: 'competitor' },\n      ]);\n    });\n\n    it('should search by entity name', async () => {\n      const result = await manager.searchNodes('Alice');\n      expect(result.entities).toHaveLength(1);\n      expect(result.entities[0].name).toBe('Alice');\n    });\n\n    it('should search by entity type', async () => {\n      const result = await manager.searchNodes('company');\n      expect(result.entities).toHaveLength(1);\n      expect(result.entities[0].name).toBe('Acme Corp');\n    });\n\n    it('should search by observation content', async () => {\n      const result = await manager.searchNodes('programming');\n      expect(result.entities).toHaveLength(1);\n      expect(result.entities[0].name).toBe('Alice');\n    });\n\n    it('should be case insensitive', async () => {\n      const result = await manager.searchNodes('ALICE');\n      expect(result.entities).toHaveLength(1);\n      expect(result.entities[0].name).toBe('Alice');\n    });\n\n    it('should include relations where at least one endpoint matches', async () => {\n      const result = await manager.searchNodes('Acme');\n      expect(result.entities).toHaveLength(2); // Alice and Acme Corp\n      // Both relations included: Alice → Acme Corp (Alice matched) and Bob → Acme Corp (Acme Corp matched)\n      expect(result.relations).toHaveLength(2);\n    });\n\n    it('should include outgoing relations to unmatched entities', async () => {\n      const result = await manager.searchNodes('Alice');\n      expect(result.entities).toHaveLength(1);\n      // Alice → Acme Corp relation included because Alice is the source\n      expect(result.relations).toHaveLength(1);\n      expect(result.relations[0].from).toBe('Alice');\n      expect(result.relations[0].to).toBe('Acme Corp');\n    });\n\n    it('should return empty graph for no matches', async () => {\n      const result = await manager.searchNodes('NonExistent');\n      expect(result.entities).toHaveLength(0);\n      expect(result.relations).toHaveLength(0);\n    });\n  });\n\n  describe('openNodes', () => {\n    beforeEach(async () => {\n      await manager.createEntities([\n        { name: 'Alice', entityType: 'person', observations: [] },\n        { name: 'Bob', entityType: 'person', observations: [] },\n        { name: 'Charlie', entityType: 'person', observations: [] },\n      ]);\n\n      await manager.createRelations([\n        { from: 'Alice', to: 'Bob', relationType: 'knows' },\n        { from: 'Bob', to: 'Charlie', relationType: 'knows' },\n      ]);\n    });\n\n    it('should open specific nodes by name', async () => {\n      const result = await manager.openNodes(['Alice', 'Bob']);\n      expect(result.entities).toHaveLength(2);\n      expect(result.entities.map(e => e.name)).toContain('Alice');\n      expect(result.entities.map(e => e.name)).toContain('Bob');\n    });\n\n    it('should include all relations connected to opened nodes', async () => {\n      const result = await manager.openNodes(['Alice', 'Bob']);\n      // Alice → Bob (both endpoints opened) and Bob → Charlie (Bob is opened)\n      expect(result.relations).toHaveLength(2);\n      expect(result.relations.some(r => r.from === 'Alice' && r.to === 'Bob')).toBe(true);\n      expect(result.relations.some(r => r.from === 'Bob' && r.to === 'Charlie')).toBe(true);\n    });\n\n    it('should include relations connected to opened nodes', async () => {\n      const result = await manager.openNodes(['Bob']);\n      // Bob has two relations: Alice → Bob and Bob → Charlie\n      expect(result.relations).toHaveLength(2);\n      expect(result.relations.some(r => r.from === 'Alice' && r.to === 'Bob')).toBe(true);\n      expect(result.relations.some(r => r.from === 'Bob' && r.to === 'Charlie')).toBe(true);\n    });\n\n    it('should include outgoing relations to nodes not in the open set', async () => {\n      // This is the core bug fix for #3137: open_nodes should return\n      // relations FROM the opened node, even if the target is not opened\n      const result = await manager.openNodes(['Alice']);\n      expect(result.entities).toHaveLength(1);\n      expect(result.entities[0].name).toBe('Alice');\n      // Alice → Bob relation is included because Alice is opened\n      expect(result.relations).toHaveLength(1);\n      expect(result.relations[0].from).toBe('Alice');\n      expect(result.relations[0].to).toBe('Bob');\n    });\n\n    it('should include incoming relations from nodes not in the open set', async () => {\n      const result = await manager.openNodes(['Charlie']);\n      expect(result.entities).toHaveLength(1);\n      // Bob → Charlie relation is included because Charlie is opened\n      expect(result.relations).toHaveLength(1);\n      expect(result.relations[0].from).toBe('Bob');\n      expect(result.relations[0].to).toBe('Charlie');\n    });\n\n    it('should handle opening non-existent nodes', async () => {\n      const result = await manager.openNodes(['NonExistent']);\n      expect(result.entities).toHaveLength(0);\n    });\n\n    it('should handle empty node list', async () => {\n      const result = await manager.openNodes([]);\n      expect(result.entities).toHaveLength(0);\n      expect(result.relations).toHaveLength(0);\n    });\n  });\n\n  describe('file persistence', () => {\n    it('should persist data across manager instances', async () => {\n      await manager.createEntities([\n        { name: 'Alice', entityType: 'person', observations: ['persistent data'] },\n      ]);\n\n      // Create new manager instance with same file path\n      const manager2 = new KnowledgeGraphManager(testFilePath);\n      const graph = await manager2.readGraph();\n\n      expect(graph.entities).toHaveLength(1);\n      expect(graph.entities[0].name).toBe('Alice');\n    });\n\n    it('should handle JSONL format correctly', async () => {\n      await manager.createEntities([\n        { name: 'Alice', entityType: 'person', observations: [] },\n      ]);\n      await manager.createRelations([\n        { from: 'Alice', to: 'Alice', relationType: 'self' },\n      ]);\n\n      // Read file directly\n      const fileContent = await fs.readFile(testFilePath, 'utf-8');\n      const lines = fileContent.split('\\n').filter(line => line.trim());\n\n      expect(lines).toHaveLength(2);\n      expect(JSON.parse(lines[0])).toHaveProperty('type', 'entity');\n      expect(JSON.parse(lines[1])).toHaveProperty('type', 'relation');\n    });\n\n    it('should strip type field from entities when loading from file', async () => {\n      // Create entities and relations (these get saved with type field)\n      await manager.createEntities([\n        { name: 'Alice', entityType: 'person', observations: ['test observation'] },\n        { name: 'Bob', entityType: 'person', observations: [] },\n      ]);\n      await manager.createRelations([\n        { from: 'Alice', to: 'Bob', relationType: 'knows' },\n      ]);\n\n      // Verify file contains type field (order may vary)\n      const fileContent = await fs.readFile(testFilePath, 'utf-8');\n      const fileLines = fileContent.split('\\n').filter(line => line.trim());\n      const fileItems = fileLines.map(line => JSON.parse(line));\n      const fileEntity = fileItems.find(item => item.type === 'entity');\n      const fileRelation = fileItems.find(item => item.type === 'relation');\n      expect(fileEntity).toBeDefined();\n      expect(fileEntity).toHaveProperty('type', 'entity');\n      expect(fileRelation).toBeDefined();\n      expect(fileRelation).toHaveProperty('type', 'relation');\n\n      // Create new manager instance to force reload from file\n      const manager2 = new KnowledgeGraphManager(testFilePath);\n      const graph = await manager2.readGraph();\n\n      // Verify loaded entities don't have type field\n      expect(graph.entities).toHaveLength(2);\n      graph.entities.forEach(entity => {\n        expect(entity).not.toHaveProperty('type');\n        expect(entity).toHaveProperty('name');\n        expect(entity).toHaveProperty('entityType');\n        expect(entity).toHaveProperty('observations');\n      });\n\n      // Verify loaded relations don't have type field\n      expect(graph.relations).toHaveLength(1);\n      graph.relations.forEach(relation => {\n        expect(relation).not.toHaveProperty('type');\n        expect(relation).toHaveProperty('from');\n        expect(relation).toHaveProperty('to');\n        expect(relation).toHaveProperty('relationType');\n      });\n    });\n\n    it('should strip type field from searchNodes results', async () => {\n      await manager.createEntities([\n        { name: 'Alice', entityType: 'person', observations: ['works at Acme'] },\n      ]);\n      await manager.createRelations([\n        { from: 'Alice', to: 'Alice', relationType: 'self' },\n      ]);\n\n      // Create new manager instance to force reload from file\n      const manager2 = new KnowledgeGraphManager(testFilePath);\n      const result = await manager2.searchNodes('Alice');\n\n      // Verify search results don't have type field\n      expect(result.entities).toHaveLength(1);\n      expect(result.entities[0]).not.toHaveProperty('type');\n      expect(result.entities[0].name).toBe('Alice');\n\n      expect(result.relations).toHaveLength(1);\n      expect(result.relations[0]).not.toHaveProperty('type');\n      expect(result.relations[0].from).toBe('Alice');\n    });\n\n    it('should strip type field from openNodes results', async () => {\n      await manager.createEntities([\n        { name: 'Alice', entityType: 'person', observations: [] },\n        { name: 'Bob', entityType: 'person', observations: [] },\n      ]);\n      await manager.createRelations([\n        { from: 'Alice', to: 'Bob', relationType: 'knows' },\n      ]);\n\n      // Create new manager instance to force reload from file\n      const manager2 = new KnowledgeGraphManager(testFilePath);\n      const result = await manager2.openNodes(['Alice', 'Bob']);\n\n      // Verify open results don't have type field\n      expect(result.entities).toHaveLength(2);\n      result.entities.forEach(entity => {\n        expect(entity).not.toHaveProperty('type');\n      });\n\n      expect(result.relations).toHaveLength(1);\n      expect(result.relations[0]).not.toHaveProperty('type');\n    });\n  });\n});\n"
  },
  {
    "path": "src/memory/index.ts",
    "content": "#!/usr/bin/env node\n\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { StdioServerTransport } from \"@modelcontextprotocol/sdk/server/stdio.js\";\nimport { z } from \"zod\";\nimport { promises as fs } from 'fs';\nimport path from 'path';\nimport { fileURLToPath } from 'url';\n\n// Define memory file path using environment variable with fallback\nexport const defaultMemoryPath = path.join(path.dirname(fileURLToPath(import.meta.url)), 'memory.jsonl');\n\n// Handle backward compatibility: migrate memory.json to memory.jsonl if needed\nexport async function ensureMemoryFilePath(): Promise<string> {\n  if (process.env.MEMORY_FILE_PATH) {\n    // Custom path provided, use it as-is (with absolute path resolution)\n    return path.isAbsolute(process.env.MEMORY_FILE_PATH)\n      ? process.env.MEMORY_FILE_PATH\n      : path.join(path.dirname(fileURLToPath(import.meta.url)), process.env.MEMORY_FILE_PATH);\n  }\n  \n  // No custom path set, check for backward compatibility migration\n  const oldMemoryPath = path.join(path.dirname(fileURLToPath(import.meta.url)), 'memory.json');\n  const newMemoryPath = defaultMemoryPath;\n  \n  try {\n    // Check if old file exists and new file doesn't\n    await fs.access(oldMemoryPath);\n    try {\n      await fs.access(newMemoryPath);\n      // Both files exist, use new one (no migration needed)\n      return newMemoryPath;\n    } catch {\n      // Old file exists, new file doesn't - migrate\n      console.error('DETECTED: Found legacy memory.json file, migrating to memory.jsonl for JSONL format compatibility');\n      await fs.rename(oldMemoryPath, newMemoryPath);\n      console.error('COMPLETED: Successfully migrated memory.json to memory.jsonl');\n      return newMemoryPath;\n    }\n  } catch {\n    // Old file doesn't exist, use new path\n    return newMemoryPath;\n  }\n}\n\n// Initialize memory file path (will be set during startup)\nlet MEMORY_FILE_PATH: string;\n\n// We are storing our memory using entities, relations, and observations in a graph structure\nexport interface Entity {\n  name: string;\n  entityType: string;\n  observations: string[];\n}\n\nexport interface Relation {\n  from: string;\n  to: string;\n  relationType: string;\n}\n\nexport interface KnowledgeGraph {\n  entities: Entity[];\n  relations: Relation[];\n}\n\n// The KnowledgeGraphManager class contains all operations to interact with the knowledge graph\nexport class KnowledgeGraphManager {\n  constructor(private memoryFilePath: string) {}\n\n  private async loadGraph(): Promise<KnowledgeGraph> {\n    try {\n      const data = await fs.readFile(this.memoryFilePath, \"utf-8\");\n      const lines = data.split(\"\\n\").filter(line => line.trim() !== \"\");\n      return lines.reduce((graph: KnowledgeGraph, line) => {\n        const item = JSON.parse(line);\n        if (item.type === \"entity\") {\n          graph.entities.push({\n            name: item.name,\n            entityType: item.entityType,\n            observations: item.observations\n          });\n        }\n        if (item.type === \"relation\") {\n          graph.relations.push({\n            from: item.from,\n            to: item.to,\n            relationType: item.relationType\n          });\n        }\n        return graph;\n      }, { entities: [], relations: [] });\n    } catch (error) {\n      if (error instanceof Error && 'code' in error && (error as any).code === \"ENOENT\") {\n        return { entities: [], relations: [] };\n      }\n      throw error;\n    }\n  }\n\n  private async saveGraph(graph: KnowledgeGraph): Promise<void> {\n    const lines = [\n      ...graph.entities.map(e => JSON.stringify({\n        type: \"entity\",\n        name: e.name,\n        entityType: e.entityType,\n        observations: e.observations\n      })),\n      ...graph.relations.map(r => JSON.stringify({\n        type: \"relation\",\n        from: r.from,\n        to: r.to,\n        relationType: r.relationType\n      })),\n    ];\n    await fs.writeFile(this.memoryFilePath, lines.join(\"\\n\"));\n  }\n\n  async createEntities(entities: Entity[]): Promise<Entity[]> {\n    const graph = await this.loadGraph();\n    const newEntities = entities.filter(e => !graph.entities.some(existingEntity => existingEntity.name === e.name));\n    graph.entities.push(...newEntities);\n    await this.saveGraph(graph);\n    return newEntities;\n  }\n\n  async createRelations(relations: Relation[]): Promise<Relation[]> {\n    const graph = await this.loadGraph();\n    const newRelations = relations.filter(r => !graph.relations.some(existingRelation => \n      existingRelation.from === r.from && \n      existingRelation.to === r.to && \n      existingRelation.relationType === r.relationType\n    ));\n    graph.relations.push(...newRelations);\n    await this.saveGraph(graph);\n    return newRelations;\n  }\n\n  async addObservations(observations: { entityName: string; contents: string[] }[]): Promise<{ entityName: string; addedObservations: string[] }[]> {\n    const graph = await this.loadGraph();\n    const results = observations.map(o => {\n      const entity = graph.entities.find(e => e.name === o.entityName);\n      if (!entity) {\n        throw new Error(`Entity with name ${o.entityName} not found`);\n      }\n      const newObservations = o.contents.filter(content => !entity.observations.includes(content));\n      entity.observations.push(...newObservations);\n      return { entityName: o.entityName, addedObservations: newObservations };\n    });\n    await this.saveGraph(graph);\n    return results;\n  }\n\n  async deleteEntities(entityNames: string[]): Promise<void> {\n    const graph = await this.loadGraph();\n    graph.entities = graph.entities.filter(e => !entityNames.includes(e.name));\n    graph.relations = graph.relations.filter(r => !entityNames.includes(r.from) && !entityNames.includes(r.to));\n    await this.saveGraph(graph);\n  }\n\n  async deleteObservations(deletions: { entityName: string; observations: string[] }[]): Promise<void> {\n    const graph = await this.loadGraph();\n    deletions.forEach(d => {\n      const entity = graph.entities.find(e => e.name === d.entityName);\n      if (entity) {\n        entity.observations = entity.observations.filter(o => !d.observations.includes(o));\n      }\n    });\n    await this.saveGraph(graph);\n  }\n\n  async deleteRelations(relations: Relation[]): Promise<void> {\n    const graph = await this.loadGraph();\n    graph.relations = graph.relations.filter(r => !relations.some(delRelation => \n      r.from === delRelation.from && \n      r.to === delRelation.to && \n      r.relationType === delRelation.relationType\n    ));\n    await this.saveGraph(graph);\n  }\n\n  async readGraph(): Promise<KnowledgeGraph> {\n    return this.loadGraph();\n  }\n\n  // Very basic search function\n  async searchNodes(query: string): Promise<KnowledgeGraph> {\n    const graph = await this.loadGraph();\n    \n    // Filter entities\n    const filteredEntities = graph.entities.filter(e => \n      e.name.toLowerCase().includes(query.toLowerCase()) ||\n      e.entityType.toLowerCase().includes(query.toLowerCase()) ||\n      e.observations.some(o => o.toLowerCase().includes(query.toLowerCase()))\n    );\n  \n    // Create a Set of filtered entity names for quick lookup\n    const filteredEntityNames = new Set(filteredEntities.map(e => e.name));\n  \n    // Include relations where at least one endpoint matches the search results.\n    // This lets callers discover connections to nodes outside the result set.\n    const filteredRelations = graph.relations.filter(r => \n      filteredEntityNames.has(r.from) || filteredEntityNames.has(r.to)\n    );\n  \n    const filteredGraph: KnowledgeGraph = {\n      entities: filteredEntities,\n      relations: filteredRelations,\n    };\n  \n    return filteredGraph;\n  }\n\n  async openNodes(names: string[]): Promise<KnowledgeGraph> {\n    const graph = await this.loadGraph();\n    \n    // Filter entities\n    const filteredEntities = graph.entities.filter(e => names.includes(e.name));\n  \n    // Create a Set of filtered entity names for quick lookup\n    const filteredEntityNames = new Set(filteredEntities.map(e => e.name));\n  \n    // Include relations where at least one endpoint is in the requested set.\n    // Previously this required BOTH endpoints, which meant relations from a\n    // requested node to an unrequested node were silently dropped — making it\n    // impossible to discover a node's connections without reading the full graph.\n    const filteredRelations = graph.relations.filter(r => \n      filteredEntityNames.has(r.from) || filteredEntityNames.has(r.to)\n    );\n  \n    const filteredGraph: KnowledgeGraph = {\n      entities: filteredEntities,\n      relations: filteredRelations,\n    };\n  \n    return filteredGraph;\n  }\n}\n\nlet knowledgeGraphManager: KnowledgeGraphManager;\n\n// Zod schemas for entities and relations\nconst EntitySchema = z.object({\n  name: z.string().describe(\"The name of the entity\"),\n  entityType: z.string().describe(\"The type of the entity\"),\n  observations: z.array(z.string()).describe(\"An array of observation contents associated with the entity\")\n});\n\nconst RelationSchema = z.object({\n  from: z.string().describe(\"The name of the entity where the relation starts\"),\n  to: z.string().describe(\"The name of the entity where the relation ends\"),\n  relationType: z.string().describe(\"The type of the relation\")\n});\n\n// The server instance and tools exposed to Claude\nconst server = new McpServer({\n  name: \"memory-server\",\n  version: \"0.6.3\",\n});\n\n// Register create_entities tool\nserver.registerTool(\n  \"create_entities\",\n  {\n    title: \"Create Entities\",\n    description: \"Create multiple new entities in the knowledge graph\",\n    inputSchema: {\n      entities: z.array(EntitySchema)\n    },\n    outputSchema: {\n      entities: z.array(EntitySchema)\n    }\n  },\n  async ({ entities }) => {\n    const result = await knowledgeGraphManager.createEntities(entities);\n    return {\n      content: [{ type: \"text\" as const, text: JSON.stringify(result, null, 2) }],\n      structuredContent: { entities: result }\n    };\n  }\n);\n\n// Register create_relations tool\nserver.registerTool(\n  \"create_relations\",\n  {\n    title: \"Create Relations\",\n    description: \"Create multiple new relations between entities in the knowledge graph. Relations should be in active voice\",\n    inputSchema: {\n      relations: z.array(RelationSchema)\n    },\n    outputSchema: {\n      relations: z.array(RelationSchema)\n    }\n  },\n  async ({ relations }) => {\n    const result = await knowledgeGraphManager.createRelations(relations);\n    return {\n      content: [{ type: \"text\" as const, text: JSON.stringify(result, null, 2) }],\n      structuredContent: { relations: result }\n    };\n  }\n);\n\n// Register add_observations tool\nserver.registerTool(\n  \"add_observations\",\n  {\n    title: \"Add Observations\",\n    description: \"Add new observations to existing entities in the knowledge graph\",\n    inputSchema: {\n      observations: z.array(z.object({\n        entityName: z.string().describe(\"The name of the entity to add the observations to\"),\n        contents: z.array(z.string()).describe(\"An array of observation contents to add\")\n      }))\n    },\n    outputSchema: {\n      results: z.array(z.object({\n        entityName: z.string(),\n        addedObservations: z.array(z.string())\n      }))\n    }\n  },\n  async ({ observations }) => {\n    const result = await knowledgeGraphManager.addObservations(observations);\n    return {\n      content: [{ type: \"text\" as const, text: JSON.stringify(result, null, 2) }],\n      structuredContent: { results: result }\n    };\n  }\n);\n\n// Register delete_entities tool\nserver.registerTool(\n  \"delete_entities\",\n  {\n    title: \"Delete Entities\",\n    description: \"Delete multiple entities and their associated relations from the knowledge graph\",\n    inputSchema: {\n      entityNames: z.array(z.string()).describe(\"An array of entity names to delete\")\n    },\n    outputSchema: {\n      success: z.boolean(),\n      message: z.string()\n    }\n  },\n  async ({ entityNames }) => {\n    await knowledgeGraphManager.deleteEntities(entityNames);\n    return {\n      content: [{ type: \"text\" as const, text: \"Entities deleted successfully\" }],\n      structuredContent: { success: true, message: \"Entities deleted successfully\" }\n    };\n  }\n);\n\n// Register delete_observations tool\nserver.registerTool(\n  \"delete_observations\",\n  {\n    title: \"Delete Observations\",\n    description: \"Delete specific observations from entities in the knowledge graph\",\n    inputSchema: {\n      deletions: z.array(z.object({\n        entityName: z.string().describe(\"The name of the entity containing the observations\"),\n        observations: z.array(z.string()).describe(\"An array of observations to delete\")\n      }))\n    },\n    outputSchema: {\n      success: z.boolean(),\n      message: z.string()\n    }\n  },\n  async ({ deletions }) => {\n    await knowledgeGraphManager.deleteObservations(deletions);\n    return {\n      content: [{ type: \"text\" as const, text: \"Observations deleted successfully\" }],\n      structuredContent: { success: true, message: \"Observations deleted successfully\" }\n    };\n  }\n);\n\n// Register delete_relations tool\nserver.registerTool(\n  \"delete_relations\",\n  {\n    title: \"Delete Relations\",\n    description: \"Delete multiple relations from the knowledge graph\",\n    inputSchema: {\n      relations: z.array(RelationSchema).describe(\"An array of relations to delete\")\n    },\n    outputSchema: {\n      success: z.boolean(),\n      message: z.string()\n    }\n  },\n  async ({ relations }) => {\n    await knowledgeGraphManager.deleteRelations(relations);\n    return {\n      content: [{ type: \"text\" as const, text: \"Relations deleted successfully\" }],\n      structuredContent: { success: true, message: \"Relations deleted successfully\" }\n    };\n  }\n);\n\n// Register read_graph tool\nserver.registerTool(\n  \"read_graph\",\n  {\n    title: \"Read Graph\",\n    description: \"Read the entire knowledge graph\",\n    inputSchema: {},\n    outputSchema: {\n      entities: z.array(EntitySchema),\n      relations: z.array(RelationSchema)\n    }\n  },\n  async () => {\n    const graph = await knowledgeGraphManager.readGraph();\n    return {\n      content: [{ type: \"text\" as const, text: JSON.stringify(graph, null, 2) }],\n      structuredContent: { ...graph }\n    };\n  }\n);\n\n// Register search_nodes tool\nserver.registerTool(\n  \"search_nodes\",\n  {\n    title: \"Search Nodes\",\n    description: \"Search for nodes in the knowledge graph based on a query\",\n    inputSchema: {\n      query: z.string().describe(\"The search query to match against entity names, types, and observation content\")\n    },\n    outputSchema: {\n      entities: z.array(EntitySchema),\n      relations: z.array(RelationSchema)\n    }\n  },\n  async ({ query }) => {\n    const graph = await knowledgeGraphManager.searchNodes(query);\n    return {\n      content: [{ type: \"text\" as const, text: JSON.stringify(graph, null, 2) }],\n      structuredContent: { ...graph }\n    };\n  }\n);\n\n// Register open_nodes tool\nserver.registerTool(\n  \"open_nodes\",\n  {\n    title: \"Open Nodes\",\n    description: \"Open specific nodes in the knowledge graph by their names\",\n    inputSchema: {\n      names: z.array(z.string()).describe(\"An array of entity names to retrieve\")\n    },\n    outputSchema: {\n      entities: z.array(EntitySchema),\n      relations: z.array(RelationSchema)\n    }\n  },\n  async ({ names }) => {\n    const graph = await knowledgeGraphManager.openNodes(names);\n    return {\n      content: [{ type: \"text\" as const, text: JSON.stringify(graph, null, 2) }],\n      structuredContent: { ...graph }\n    };\n  }\n);\n\nasync function main() {\n  // Initialize memory file path with backward compatibility\n  MEMORY_FILE_PATH = await ensureMemoryFilePath();\n\n  // Initialize knowledge graph manager with the memory file path\n  knowledgeGraphManager = new KnowledgeGraphManager(MEMORY_FILE_PATH);\n\n  const transport = new StdioServerTransport();\n  await server.connect(transport);\n  console.error(\"Knowledge Graph MCP Server running on stdio\");\n}\n\nmain().catch((error) => {\n  console.error(\"Fatal error in main():\", error);\n  process.exit(1);\n});\n"
  },
  {
    "path": "src/memory/package.json",
    "content": "{\n  \"name\": \"@modelcontextprotocol/server-memory\",\n  \"version\": \"0.6.3\",\n  \"description\": \"MCP server for enabling memory for Claude through a knowledge graph\",\n  \"license\": \"SEE LICENSE IN LICENSE\",\n  \"mcpName\": \"io.github.modelcontextprotocol/server-memory\",\n  \"author\": \"Model Context Protocol a Series of LF Projects, LLC.\",\n  \"homepage\": \"https://modelcontextprotocol.io\",\n  \"bugs\": \"https://github.com/modelcontextprotocol/servers/issues\",\n  \"repository\": {\n    \"type\": \"git\",\n    \"url\": \"https://github.com/modelcontextprotocol/servers.git\"\n  },\n  \"type\": \"module\",\n  \"bin\": {\n    \"mcp-server-memory\": \"dist/index.js\"\n  },\n  \"files\": [\n    \"dist\"\n  ],\n  \"scripts\": {\n    \"build\": \"tsc && shx chmod +x dist/*.js\",\n    \"prepare\": \"npm run build\",\n    \"watch\": \"tsc --watch\",\n    \"test\": \"vitest run --coverage\"\n  },\n  \"dependencies\": {\n    \"@modelcontextprotocol/sdk\": \"^1.26.0\"\n  },\n  \"devDependencies\": {\n    \"@types/node\": \"^22\",\n    \"@vitest/coverage-v8\": \"^2.1.8\",\n    \"shx\": \"^0.3.4\",\n    \"typescript\": \"^5.6.2\",\n    \"vitest\": \"^2.1.8\"\n  }\n}"
  },
  {
    "path": "src/memory/tsconfig.json",
    "content": "{\n  \"extends\": \"../../tsconfig.json\",\n  \"compilerOptions\": {\n    \"outDir\": \"./dist\",\n    \"rootDir\": \".\"\n  },\n  \"include\": [\n    \"./**/*.ts\"\n  ],\n  \"exclude\": [\n    \"**/*.test.ts\",\n    \"vitest.config.ts\"\n  ]\n}\n"
  },
  {
    "path": "src/memory/vitest.config.ts",
    "content": "import { defineConfig } from 'vitest/config';\n\nexport default defineConfig({\n  test: {\n    globals: true,\n    environment: 'node',\n    include: ['**/__tests__/**/*.test.ts'],\n    coverage: {\n      provider: 'v8',\n      include: ['**/*.ts'],\n      exclude: ['**/__tests__/**', '**/dist/**'],\n    },\n  },\n});\n"
  },
  {
    "path": "src/sequentialthinking/Dockerfile",
    "content": "FROM node:22.12-alpine AS builder\n\nCOPY src/sequentialthinking /app\nCOPY tsconfig.json /tsconfig.json\n\nWORKDIR /app\n\nRUN --mount=type=cache,target=/root/.npm npm install\n\nRUN --mount=type=cache,target=/root/.npm-production npm ci --ignore-scripts --omit-dev\n\nFROM node:22-alpine AS release\n\nCOPY --from=builder /app/dist /app/dist\nCOPY --from=builder /app/package.json /app/package.json\nCOPY --from=builder /app/package-lock.json /app/package-lock.json\n\nENV NODE_ENV=production\n\nWORKDIR /app\n\nRUN npm ci --ignore-scripts --omit-dev\n\nENTRYPOINT [\"node\", \"dist/index.js\"]\n"
  },
  {
    "path": "src/sequentialthinking/README.md",
    "content": "# Sequential Thinking MCP Server\n\nAn MCP server implementation that provides a tool for dynamic and reflective problem-solving through a structured thinking process.\n\n## Features\n\n- Break down complex problems into manageable steps\n- Revise and refine thoughts as understanding deepens\n- Branch into alternative paths of reasoning\n- Adjust the total number of thoughts dynamically\n- Generate and verify solution hypotheses\n\n## Tool\n\n### sequential_thinking\n\nFacilitates a detailed, step-by-step thinking process for problem-solving and analysis.\n\n**Inputs:**\n- `thought` (string): The current thinking step\n- `nextThoughtNeeded` (boolean): Whether another thought step is needed\n- `thoughtNumber` (integer): Current thought number\n- `totalThoughts` (integer): Estimated total thoughts needed\n- `isRevision` (boolean, optional): Whether this revises previous thinking\n- `revisesThought` (integer, optional): Which thought is being reconsidered\n- `branchFromThought` (integer, optional): Branching point thought number\n- `branchId` (string, optional): Branch identifier\n- `needsMoreThoughts` (boolean, optional): If more thoughts are needed\n\n## Usage\n\nThe Sequential Thinking tool is designed for:\n- Breaking down complex problems into steps\n- Planning and design with room for revision\n- Analysis that might need course correction\n- Problems where the full scope might not be clear initially\n- Tasks that need to maintain context over multiple steps\n- Situations where irrelevant information needs to be filtered out\n\n## Configuration\n\n### Usage with Claude Desktop\n\nAdd this to your `claude_desktop_config.json`:\n\n#### npx\n\n```json\n{\n  \"mcpServers\": {\n    \"sequential-thinking\": {\n      \"command\": \"npx\",\n      \"args\": [\n        \"-y\",\n        \"@modelcontextprotocol/server-sequential-thinking\"\n      ]\n    }\n  }\n}\n```\n\n#### docker\n\n```json\n{\n  \"mcpServers\": {\n    \"sequentialthinking\": {\n      \"command\": \"docker\",\n      \"args\": [\n        \"run\",\n        \"--rm\",\n        \"-i\",\n        \"mcp/sequentialthinking\"\n      ]\n    }\n  }\n}\n```\n\nTo disable logging of thought information set env var: `DISABLE_THOUGHT_LOGGING` to `true`.\nComment\n\n### Usage with VS Code\n\nFor quick installation, click one of the installation buttons below...\n\n[![Install with NPX in VS Code](https://img.shields.io/badge/VS_Code-NPM-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=sequentialthinking&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%40modelcontextprotocol%2Fserver-sequential-thinking%22%5D%7D) [![Install with NPX in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-NPM-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=sequentialthinking&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%40modelcontextprotocol%2Fserver-sequential-thinking%22%5D%7D&quality=insiders)\n\n[![Install with Docker in VS Code](https://img.shields.io/badge/VS_Code-Docker-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=sequentialthinking&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22--rm%22%2C%22-i%22%2C%22mcp%2Fsequentialthinking%22%5D%7D) [![Install with Docker in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Docker-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=sequentialthinking&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22--rm%22%2C%22-i%22%2C%22mcp%2Fsequentialthinking%22%5D%7D&quality=insiders)\n\nFor manual installation, you can configure the MCP server using one of these methods:\n\n**Method 1: User Configuration (Recommended)**\nAdd the configuration to your user-level MCP configuration file. Open the Command Palette (`Ctrl + Shift + P`) and run `MCP: Open User Configuration`. This will open your user `mcp.json` file where you can add the server configuration.\n\n**Method 2: Workspace Configuration**\nAlternatively, you can add the configuration to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others.\n\n> For more details about MCP configuration in VS Code, see the [official VS Code MCP documentation](https://code.visualstudio.com/docs/copilot/customization/mcp-servers).\n\nFor NPX installation:\n\n```json\n{\n  \"servers\": {\n    \"sequential-thinking\": {\n      \"command\": \"npx\",\n      \"args\": [\n        \"-y\",\n        \"@modelcontextprotocol/server-sequential-thinking\"\n      ]\n    }\n  }\n}\n```\n\nFor Docker installation:\n\n```json\n{\n  \"servers\": {\n    \"sequential-thinking\": {\n      \"command\": \"docker\",\n      \"args\": [\n        \"run\",\n        \"--rm\",\n        \"-i\",\n        \"mcp/sequentialthinking\"\n      ]\n    }\n  }\n}\n```\n\n### Usage with Codex CLI\n\nRun the following:\n\n#### npx\n\n```bash\ncodex mcp add sequential-thinking npx -y @modelcontextprotocol/server-sequential-thinking\n```\n\n## Building\n\nDocker:\n\n```bash\ndocker build -t mcp/sequentialthinking -f src/sequentialthinking/Dockerfile .\n```\n\n## License\n\nThis MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository.\n"
  },
  {
    "path": "src/sequentialthinking/__tests__/lib.test.ts",
    "content": "import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';\nimport { SequentialThinkingServer, ThoughtData } from '../lib.js';\n\n// Mock chalk to avoid ESM issues\nvi.mock('chalk', () => {\n  const chalkMock = {\n    yellow: (str: string) => str,\n    green: (str: string) => str,\n    blue: (str: string) => str,\n  };\n  return {\n    default: chalkMock,\n  };\n});\n\ndescribe('SequentialThinkingServer', () => {\n  let server: SequentialThinkingServer;\n\n  beforeEach(() => {\n    // Disable thought logging for tests\n    process.env.DISABLE_THOUGHT_LOGGING = 'true';\n    server = new SequentialThinkingServer();\n  });\n\n  // Note: Input validation tests removed - validation now happens at the tool\n  // registration layer via Zod schemas before processThought is called\n\n  describe('processThought - valid inputs', () => {\n    it('should accept valid basic thought', () => {\n      const input = {\n        thought: 'This is my first thought',\n        thoughtNumber: 1,\n        totalThoughts: 3,\n        nextThoughtNeeded: true\n      };\n\n      const result = server.processThought(input);\n      expect(result.isError).toBeUndefined();\n\n      const data = JSON.parse(result.content[0].text);\n      expect(data.thoughtNumber).toBe(1);\n      expect(data.totalThoughts).toBe(3);\n      expect(data.nextThoughtNeeded).toBe(true);\n      expect(data.thoughtHistoryLength).toBe(1);\n    });\n\n    it('should accept thought with optional fields', () => {\n      const input = {\n        thought: 'Revising my earlier idea',\n        thoughtNumber: 2,\n        totalThoughts: 3,\n        nextThoughtNeeded: true,\n        isRevision: true,\n        revisesThought: 1,\n        needsMoreThoughts: false\n      };\n\n      const result = server.processThought(input);\n      expect(result.isError).toBeUndefined();\n\n      const data = JSON.parse(result.content[0].text);\n      expect(data.thoughtNumber).toBe(2);\n      expect(data.thoughtHistoryLength).toBe(1);\n    });\n\n    it('should track multiple thoughts in history', () => {\n      const input1 = {\n        thought: 'First thought',\n        thoughtNumber: 1,\n        totalThoughts: 3,\n        nextThoughtNeeded: true\n      };\n\n      const input2 = {\n        thought: 'Second thought',\n        thoughtNumber: 2,\n        totalThoughts: 3,\n        nextThoughtNeeded: true\n      };\n\n      const input3 = {\n        thought: 'Final thought',\n        thoughtNumber: 3,\n        totalThoughts: 3,\n        nextThoughtNeeded: false\n      };\n\n      server.processThought(input1);\n      server.processThought(input2);\n      const result = server.processThought(input3);\n\n      const data = JSON.parse(result.content[0].text);\n      expect(data.thoughtHistoryLength).toBe(3);\n      expect(data.nextThoughtNeeded).toBe(false);\n    });\n\n    it('should auto-adjust totalThoughts if thoughtNumber exceeds it', () => {\n      const input = {\n        thought: 'Thought 5',\n        thoughtNumber: 5,\n        totalThoughts: 3,\n        nextThoughtNeeded: true\n      };\n\n      const result = server.processThought(input);\n      const data = JSON.parse(result.content[0].text);\n\n      expect(data.totalThoughts).toBe(5);\n    });\n  });\n\n  describe('processThought - branching', () => {\n    it('should track branches correctly', () => {\n      const input1 = {\n        thought: 'Main thought',\n        thoughtNumber: 1,\n        totalThoughts: 3,\n        nextThoughtNeeded: true\n      };\n\n      const input2 = {\n        thought: 'Branch A thought',\n        thoughtNumber: 2,\n        totalThoughts: 3,\n        nextThoughtNeeded: true,\n        branchFromThought: 1,\n        branchId: 'branch-a'\n      };\n\n      const input3 = {\n        thought: 'Branch B thought',\n        thoughtNumber: 2,\n        totalThoughts: 3,\n        nextThoughtNeeded: false,\n        branchFromThought: 1,\n        branchId: 'branch-b'\n      };\n\n      server.processThought(input1);\n      server.processThought(input2);\n      const result = server.processThought(input3);\n\n      const data = JSON.parse(result.content[0].text);\n      expect(data.branches).toContain('branch-a');\n      expect(data.branches).toContain('branch-b');\n      expect(data.branches.length).toBe(2);\n      expect(data.thoughtHistoryLength).toBe(3);\n    });\n\n    it('should allow multiple thoughts in same branch', () => {\n      const input1 = {\n        thought: 'Branch thought 1',\n        thoughtNumber: 1,\n        totalThoughts: 2,\n        nextThoughtNeeded: true,\n        branchFromThought: 1,\n        branchId: 'branch-a'\n      };\n\n      const input2 = {\n        thought: 'Branch thought 2',\n        thoughtNumber: 2,\n        totalThoughts: 2,\n        nextThoughtNeeded: false,\n        branchFromThought: 1,\n        branchId: 'branch-a'\n      };\n\n      server.processThought(input1);\n      const result = server.processThought(input2);\n\n      const data = JSON.parse(result.content[0].text);\n      expect(data.branches).toContain('branch-a');\n      expect(data.branches.length).toBe(1);\n    });\n  });\n\n  describe('processThought - edge cases', () => {\n    it('should handle very long thought strings', () => {\n      const input = {\n        thought: 'a'.repeat(10000),\n        thoughtNumber: 1,\n        totalThoughts: 1,\n        nextThoughtNeeded: false\n      };\n\n      const result = server.processThought(input);\n      expect(result.isError).toBeUndefined();\n    });\n\n    it('should handle thoughtNumber = 1, totalThoughts = 1', () => {\n      const input = {\n        thought: 'Only thought',\n        thoughtNumber: 1,\n        totalThoughts: 1,\n        nextThoughtNeeded: false\n      };\n\n      const result = server.processThought(input);\n      expect(result.isError).toBeUndefined();\n\n      const data = JSON.parse(result.content[0].text);\n      expect(data.thoughtNumber).toBe(1);\n      expect(data.totalThoughts).toBe(1);\n    });\n\n    it('should handle nextThoughtNeeded = false', () => {\n      const input = {\n        thought: 'Final thought',\n        thoughtNumber: 3,\n        totalThoughts: 3,\n        nextThoughtNeeded: false\n      };\n\n      const result = server.processThought(input);\n      const data = JSON.parse(result.content[0].text);\n\n      expect(data.nextThoughtNeeded).toBe(false);\n    });\n  });\n\n  describe('processThought - response format', () => {\n    it('should return correct response structure on success', () => {\n      const input = {\n        thought: 'Test thought',\n        thoughtNumber: 1,\n        totalThoughts: 1,\n        nextThoughtNeeded: false\n      };\n\n      const result = server.processThought(input);\n\n      expect(result).toHaveProperty('content');\n      expect(Array.isArray(result.content)).toBe(true);\n      expect(result.content.length).toBe(1);\n      expect(result.content[0]).toHaveProperty('type', 'text');\n      expect(result.content[0]).toHaveProperty('text');\n    });\n\n    it('should return valid JSON in response', () => {\n      const input = {\n        thought: 'Test thought',\n        thoughtNumber: 1,\n        totalThoughts: 1,\n        nextThoughtNeeded: false\n      };\n\n      const result = server.processThought(input);\n\n      expect(() => JSON.parse(result.content[0].text)).not.toThrow();\n    });\n  });\n\n  describe('processThought - with logging enabled', () => {\n    let serverWithLogging: SequentialThinkingServer;\n\n    beforeEach(() => {\n      // Enable thought logging for these tests\n      delete process.env.DISABLE_THOUGHT_LOGGING;\n      serverWithLogging = new SequentialThinkingServer();\n    });\n\n    afterEach(() => {\n      // Reset to disabled for other tests\n      process.env.DISABLE_THOUGHT_LOGGING = 'true';\n    });\n\n    it('should format and log regular thoughts', () => {\n      const input = {\n        thought: 'Test thought with logging',\n        thoughtNumber: 1,\n        totalThoughts: 3,\n        nextThoughtNeeded: true\n      };\n\n      const result = serverWithLogging.processThought(input);\n      expect(result.isError).toBeUndefined();\n    });\n\n    it('should format and log revision thoughts', () => {\n      const input = {\n        thought: 'Revised thought',\n        thoughtNumber: 2,\n        totalThoughts: 3,\n        nextThoughtNeeded: true,\n        isRevision: true,\n        revisesThought: 1\n      };\n\n      const result = serverWithLogging.processThought(input);\n      expect(result.isError).toBeUndefined();\n    });\n\n    it('should format and log branch thoughts', () => {\n      const input = {\n        thought: 'Branch thought',\n        thoughtNumber: 2,\n        totalThoughts: 3,\n        nextThoughtNeeded: false,\n        branchFromThought: 1,\n        branchId: 'branch-a'\n      };\n\n      const result = serverWithLogging.processThought(input);\n      expect(result.isError).toBeUndefined();\n    });\n  });\n});\n"
  },
  {
    "path": "src/sequentialthinking/index.ts",
    "content": "#!/usr/bin/env node\n\nimport { McpServer } from \"@modelcontextprotocol/sdk/server/mcp.js\";\nimport { StdioServerTransport } from \"@modelcontextprotocol/sdk/server/stdio.js\";\nimport { z } from \"zod\";\nimport { SequentialThinkingServer } from './lib.js';\n\n/** Safe boolean coercion that correctly handles string \"false\" */\nconst coercedBoolean = z.preprocess((val) => {\n  if (typeof val === \"boolean\") return val;\n  if (typeof val === \"string\") {\n    if (val.toLowerCase() === \"true\") return true;\n    if (val.toLowerCase() === \"false\") return false;\n  }\n  return val;\n}, z.boolean());\n\nconst server = new McpServer({\n  name: \"sequential-thinking-server\",\n  version: \"0.2.0\",\n});\n\nconst thinkingServer = new SequentialThinkingServer();\n\nserver.registerTool(\n  \"sequentialthinking\",\n  {\n    title: \"Sequential Thinking\",\n    description: `A detailed tool for dynamic and reflective problem-solving through thoughts.\nThis tool helps analyze problems through a flexible thinking process that can adapt and evolve.\nEach thought can build on, question, or revise previous insights as understanding deepens.\n\nWhen to use this tool:\n- Breaking down complex problems into steps\n- Planning and design with room for revision\n- Analysis that might need course correction\n- Problems where the full scope might not be clear initially\n- Problems that require a multi-step solution\n- Tasks that need to maintain context over multiple steps\n- Situations where irrelevant information needs to be filtered out\n\nKey features:\n- You can adjust total_thoughts up or down as you progress\n- You can question or revise previous thoughts\n- You can add more thoughts even after reaching what seemed like the end\n- You can express uncertainty and explore alternative approaches\n- Not every thought needs to build linearly - you can branch or backtrack\n- Generates a solution hypothesis\n- Verifies the hypothesis based on the Chain of Thought steps\n- Repeats the process until satisfied\n- Provides a correct answer\n\nParameters explained:\n- thought: Your current thinking step, which can include:\n  * Regular analytical steps\n  * Revisions of previous thoughts\n  * Questions about previous decisions\n  * Realizations about needing more analysis\n  * Changes in approach\n  * Hypothesis generation\n  * Hypothesis verification\n- nextThoughtNeeded: True if you need more thinking, even if at what seemed like the end\n- thoughtNumber: Current number in sequence (can go beyond initial total if needed)\n- totalThoughts: Current estimate of thoughts needed (can be adjusted up/down)\n- isRevision: A boolean indicating if this thought revises previous thinking\n- revisesThought: If is_revision is true, which thought number is being reconsidered\n- branchFromThought: If branching, which thought number is the branching point\n- branchId: Identifier for the current branch (if any)\n- needsMoreThoughts: If reaching end but realizing more thoughts needed\n\nYou should:\n1. Start with an initial estimate of needed thoughts, but be ready to adjust\n2. Feel free to question or revise previous thoughts\n3. Don't hesitate to add more thoughts if needed, even at the \"end\"\n4. Express uncertainty when present\n5. Mark thoughts that revise previous thinking or branch into new paths\n6. Ignore information that is irrelevant to the current step\n7. Generate a solution hypothesis when appropriate\n8. Verify the hypothesis based on the Chain of Thought steps\n9. Repeat the process until satisfied with the solution\n10. Provide a single, ideally correct answer as the final output\n11. Only set nextThoughtNeeded to false when truly done and a satisfactory answer is reached`,\n    inputSchema: {\n      thought: z.string().describe(\"Your current thinking step\"),\n      nextThoughtNeeded: coercedBoolean.describe(\"Whether another thought step is needed\"),\n      thoughtNumber: z.coerce.number().int().min(1).describe(\"Current thought number (numeric value, e.g., 1, 2, 3)\"),\n      totalThoughts: z.coerce.number().int().min(1).describe(\"Estimated total thoughts needed (numeric value, e.g., 5, 10)\"),\n      isRevision: coercedBoolean.optional().describe(\"Whether this revises previous thinking\"),\n      revisesThought: z.coerce.number().int().min(1).optional().describe(\"Which thought is being reconsidered\"),\n      branchFromThought: z.coerce.number().int().min(1).optional().describe(\"Branching point thought number\"),\n      branchId: z.string().optional().describe(\"Branch identifier\"),\n      needsMoreThoughts: coercedBoolean.optional().describe(\"If more thoughts are needed\")\n    },\n    annotations: {\n      readOnlyHint: true,\n      destructiveHint: false,\n      idempotentHint: true,\n      openWorldHint: false,\n    },\n    outputSchema: {\n      thoughtNumber: z.number(),\n      totalThoughts: z.number(),\n      nextThoughtNeeded: z.boolean(),\n      branches: z.array(z.string()),\n      thoughtHistoryLength: z.number()\n    },\n  },\n  async (args) => {\n    const result = thinkingServer.processThought(args);\n\n    if (result.isError) {\n      return result;\n    }\n\n    // Parse the JSON response to get structured content\n    const parsedContent = JSON.parse(result.content[0].text);\n\n    return {\n      content: result.content,\n      structuredContent: parsedContent\n    };\n  }\n);\n\nasync function runServer() {\n  const transport = new StdioServerTransport();\n  await server.connect(transport);\n  console.error(\"Sequential Thinking MCP Server running on stdio\");\n}\n\nrunServer().catch((error) => {\n  console.error(\"Fatal error running server:\", error);\n  process.exit(1);\n});\n"
  },
  {
    "path": "src/sequentialthinking/lib.ts",
    "content": "import chalk from 'chalk';\n\nexport interface ThoughtData {\n  thought: string;\n  thoughtNumber: number;\n  totalThoughts: number;\n  isRevision?: boolean;\n  revisesThought?: number;\n  branchFromThought?: number;\n  branchId?: string;\n  needsMoreThoughts?: boolean;\n  nextThoughtNeeded: boolean;\n}\n\nexport class SequentialThinkingServer {\n  private thoughtHistory: ThoughtData[] = [];\n  private branches: Record<string, ThoughtData[]> = {};\n  private disableThoughtLogging: boolean;\n\n  constructor() {\n    this.disableThoughtLogging = (process.env.DISABLE_THOUGHT_LOGGING || \"\").toLowerCase() === \"true\";\n  }\n\n  private formatThought(thoughtData: ThoughtData): string {\n    const { thoughtNumber, totalThoughts, thought, isRevision, revisesThought, branchFromThought, branchId } = thoughtData;\n\n    let prefix = '';\n    let context = '';\n\n    if (isRevision) {\n      prefix = chalk.yellow('🔄 Revision');\n      context = ` (revising thought ${revisesThought})`;\n    } else if (branchFromThought) {\n      prefix = chalk.green('🌿 Branch');\n      context = ` (from thought ${branchFromThought}, ID: ${branchId})`;\n    } else {\n      prefix = chalk.blue('💭 Thought');\n      context = '';\n    }\n\n    const header = `${prefix} ${thoughtNumber}/${totalThoughts}${context}`;\n    const border = '─'.repeat(Math.max(header.length, thought.length) + 4);\n\n    return `\n┌${border}┐\n│ ${header} │\n├${border}┤\n│ ${thought.padEnd(border.length - 2)} │\n└${border}┘`;\n  }\n\n  public processThought(input: ThoughtData): { content: Array<{ type: \"text\"; text: string }>; isError?: boolean } {\n    try {\n      // Validation happens at the tool registration layer via Zod\n      // Adjust totalThoughts if thoughtNumber exceeds it\n      if (input.thoughtNumber > input.totalThoughts) {\n        input.totalThoughts = input.thoughtNumber;\n      }\n\n      this.thoughtHistory.push(input);\n\n      if (input.branchFromThought && input.branchId) {\n        if (!this.branches[input.branchId]) {\n          this.branches[input.branchId] = [];\n        }\n        this.branches[input.branchId].push(input);\n      }\n\n      if (!this.disableThoughtLogging) {\n        const formattedThought = this.formatThought(input);\n        console.error(formattedThought);\n      }\n\n      return {\n        content: [{\n          type: \"text\" as const,\n          text: JSON.stringify({\n            thoughtNumber: input.thoughtNumber,\n            totalThoughts: input.totalThoughts,\n            nextThoughtNeeded: input.nextThoughtNeeded,\n            branches: Object.keys(this.branches),\n            thoughtHistoryLength: this.thoughtHistory.length\n          }, null, 2)\n        }]\n      };\n    } catch (error) {\n      return {\n        content: [{\n          type: \"text\" as const,\n          text: JSON.stringify({\n            error: error instanceof Error ? error.message : String(error),\n            status: 'failed'\n          }, null, 2)\n        }],\n        isError: true\n      };\n    }\n  }\n}\n"
  },
  {
    "path": "src/sequentialthinking/package.json",
    "content": "{\n  \"name\": \"@modelcontextprotocol/server-sequential-thinking\",\n  \"version\": \"0.6.2\",\n  \"description\": \"MCP server for sequential thinking and problem solving\",\n  \"license\": \"SEE LICENSE IN LICENSE\",\n  \"mcpName\": \"io.github.modelcontextprotocol/server-sequential-thinking\",\n  \"author\": \"Model Context Protocol a Series of LF Projects, LLC.\",\n  \"homepage\": \"https://modelcontextprotocol.io\",\n  \"bugs\": \"https://github.com/modelcontextprotocol/servers/issues\",\n  \"repository\": {\n    \"type\": \"git\",\n    \"url\": \"https://github.com/modelcontextprotocol/servers.git\"\n  },\n  \"type\": \"module\",\n  \"bin\": {\n    \"mcp-server-sequential-thinking\": \"dist/index.js\"\n  },\n  \"files\": [\n    \"dist\"\n  ],\n  \"scripts\": {\n    \"build\": \"tsc && shx chmod +x dist/*.js\",\n    \"prepare\": \"npm run build\",\n    \"watch\": \"tsc --watch\",\n    \"test\": \"vitest run --coverage\"\n  },\n  \"dependencies\": {\n    \"@modelcontextprotocol/sdk\": \"^1.26.0\",\n    \"chalk\": \"^5.3.0\",\n    \"yargs\": \"^17.7.2\"\n  },\n  \"devDependencies\": {\n    \"@types/node\": \"^22\",\n    \"@types/yargs\": \"^17.0.32\",\n    \"@vitest/coverage-v8\": \"^2.1.8\",\n    \"shx\": \"^0.3.4\",\n    \"typescript\": \"^5.3.3\",\n    \"vitest\": \"^2.1.8\"\n  }\n}"
  },
  {
    "path": "src/sequentialthinking/tsconfig.json",
    "content": "{\n  \"extends\": \"../../tsconfig.json\",\n  \"compilerOptions\": {\n    \"outDir\": \"./dist\",\n    \"rootDir\": \".\"\n  },\n  \"include\": [\n    \"./**/*.ts\"\n  ],\n  \"exclude\": [\n    \"**/*.test.ts\",\n    \"vitest.config.ts\"\n  ]\n}\n"
  },
  {
    "path": "src/sequentialthinking/vitest.config.ts",
    "content": "import { defineConfig } from 'vitest/config';\n\nexport default defineConfig({\n  test: {\n    globals: true,\n    environment: 'node',\n    include: ['**/__tests__/**/*.test.ts'],\n    coverage: {\n      provider: 'v8',\n      include: ['**/*.ts'],\n      exclude: ['**/__tests__/**', '**/dist/**'],\n    },\n  },\n});\n"
  },
  {
    "path": "src/time/.python-version",
    "content": "3.10\n"
  },
  {
    "path": "src/time/Dockerfile",
    "content": "# Use a Python image with uv pre-installed\nFROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim AS uv\n\n# Install the project into `/app`\nWORKDIR /app\n\n# Enable bytecode compilation\nENV UV_COMPILE_BYTECODE=1\n\n# Copy from the cache instead of linking since it's a mounted volume\nENV UV_LINK_MODE=copy\n\n# Install the project's dependencies using the lockfile and settings\nRUN --mount=type=cache,target=/root/.cache/uv \\\n    --mount=type=bind,source=uv.lock,target=uv.lock \\\n    --mount=type=bind,source=pyproject.toml,target=pyproject.toml \\\n    uv sync --locked --no-install-project --no-dev --no-editable\n\n# Then, add the rest of the project source code and install it\n# Installing separately from its dependencies allows optimal layer caching\nADD . /app\nRUN --mount=type=cache,target=/root/.cache/uv \\\n    uv sync --locked --no-dev --no-editable\n\nFROM python:3.12-slim-bookworm\n\nWORKDIR /app\n \nCOPY --from=uv /root/.local /root/.local\nCOPY --from=uv --chown=app:app /app/.venv /app/.venv\n\n# Place executables in the environment at the front of the path\nENV PATH=\"/app/.venv/bin:$PATH\"\n\n# Set the LOCAL_TIMEZONE environment variable\nENV LOCAL_TIMEZONE=${LOCAL_TIMEZONE:-\"UTC\"}\n\n# when running the container, add --local-timezone and a bind mount to the host's db file\nENTRYPOINT [\"mcp-server-time\", \"--local-timezone\", \"${LOCAL_TIMEZONE}\"]\n"
  },
  {
    "path": "src/time/README.md",
    "content": "# Time MCP Server\n\n<!-- mcp-name: io.github.modelcontextprotocol/server-time -->\n\nA Model Context Protocol server that provides time and timezone conversion capabilities. This server enables LLMs to get current time information and perform timezone conversions using IANA timezone names, with automatic system timezone detection.\n\n### Available Tools\n\n- `get_current_time` - Get current time in a specific timezone or system timezone.\n  - Required arguments:\n    - `timezone` (string): IANA timezone name (e.g., 'America/New_York', 'Europe/London')\n\n- `convert_time` - Convert time between timezones.\n  - Required arguments:\n    - `source_timezone` (string): Source IANA timezone name\n    - `time` (string): Time in 24-hour format (HH:MM)\n    - `target_timezone` (string): Target IANA timezone name\n\n## Installation\n\n### Using uv (recommended)\n\nWhen using [`uv`](https://docs.astral.sh/uv/) no specific installation is needed. We will\nuse [`uvx`](https://docs.astral.sh/uv/guides/tools/) to directly run *mcp-server-time*.\n\n```bash\nuvx mcp-server-time\n```\n\n### Using PIP\n\nAlternatively you can install `mcp-server-time` via pip:\n\n```bash\npip install mcp-server-time\n```\n\nAfter installation, you can run it as a script using:\n\n```bash\npython -m mcp_server_time\n```\n\n## Configuration\n\n### Configure for Claude.app\n\nAdd to your Claude settings:\n\n<details>\n<summary>Using uvx</summary>\n\n```json\n{\n  \"mcpServers\": {\n    \"time\": {\n      \"command\": \"uvx\",\n      \"args\": [\"mcp-server-time\"]\n    }\n  }\n}\n```\n</details>\n\n<details>\n<summary>Using docker</summary>\n\n```json\n{\n  \"mcpServers\": {\n    \"time\": {\n      \"command\": \"docker\",\n      \"args\": [\"run\", \"-i\", \"--rm\", \"-e\", \"LOCAL_TIMEZONE\", \"mcp/time\"]\n    }\n  }\n}\n```\n</details>\n\n<details>\n<summary>Using pip installation</summary>\n\n```json\n{\n  \"mcpServers\": {\n    \"time\": {\n      \"command\": \"python\",\n      \"args\": [\"-m\", \"mcp_server_time\"]\n    }\n  }\n}\n```\n</details>\n\n### Configure for Zed\n\nAdd to your Zed settings.json:\n\n<details>\n<summary>Using uvx</summary>\n\n```json\n\"context_servers\": [\n  \"mcp-server-time\": {\n    \"command\": \"uvx\",\n    \"args\": [\"mcp-server-time\"]\n  }\n],\n```\n</details>\n\n<details>\n<summary>Using pip installation</summary>\n\n```json\n\"context_servers\": {\n  \"mcp-server-time\": {\n    \"command\": \"python\",\n    \"args\": [\"-m\", \"mcp_server_time\"]\n  }\n},\n```\n</details>\n\n### Configure for VS Code\n\nFor quick installation, use one of the one-click install buttons below...\n\n[![Install with UV in VS Code](https://img.shields.io/badge/VS_Code-UV-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=time&config=%7B%22command%22%3A%22uvx%22%2C%22args%22%3A%5B%22mcp-server-time%22%5D%7D) [![Install with UV in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-UV-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=time&config=%7B%22command%22%3A%22uvx%22%2C%22args%22%3A%5B%22mcp-server-time%22%5D%7D&quality=insiders)\n\n[![Install with Docker in VS Code](https://img.shields.io/badge/VS_Code-Docker-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=time&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22--rm%22%2C%22mcp%2Ftime%22%5D%7D) [![Install with Docker in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Docker-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=time&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22--rm%22%2C%22mcp%2Ftime%22%5D%7D&quality=insiders)\n\nFor manual installation, add the following JSON block to your User Settings (JSON) file in VS Code. You can do this by pressing `Ctrl + Shift + P` and typing `Preferences: Open User Settings (JSON)`.\n\nOptionally, you can add it to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others.\n\n> Note that the `mcp` key is needed when using the `mcp.json` file.\n\n<details>\n<summary>Using uvx</summary>\n\n```json\n{\n  \"mcp\": {\n    \"servers\": {\n      \"time\": {\n        \"command\": \"uvx\",\n        \"args\": [\"mcp-server-time\"]\n      }\n    }\n  }\n}\n```\n</details>\n\n<details>\n<summary>Using Docker</summary>\n\n```json\n{\n  \"mcp\": {\n    \"servers\": {\n      \"time\": {\n        \"command\": \"docker\",\n        \"args\": [\"run\", \"-i\", \"--rm\", \"mcp/time\"]\n      }\n    }\n  }\n}\n```\n</details>\n\n### Configure for Zencoder\n\n1. Go to the Zencoder menu (...)\n2. From the dropdown menu, select `Agent Tools`\n3. Click on the `Add Custom MCP`\n4. Add the name and server configuration from below, and make sure to hit the `Install` button\n\n<details>\n<summary>Using uvx</summary>\n\n```json\n{\n    \"command\": \"uvx\",\n    \"args\": [\"mcp-server-time\"]\n  }\n```\n</details>\n\n### Customization - System Timezone\n\nBy default, the server automatically detects your system's timezone. You can override this by adding the argument `--local-timezone` to the `args` list in the configuration.\n\nExample:\n```json\n{\n  \"command\": \"python\",\n  \"args\": [\"-m\", \"mcp_server_time\", \"--local-timezone=America/New_York\"]\n}\n```\n\n## Example Interactions\n\n1. Get current time:\n```json\n{\n  \"name\": \"get_current_time\",\n  \"arguments\": {\n    \"timezone\": \"Europe/Warsaw\"\n  }\n}\n```\nResponse:\n```json\n{\n  \"timezone\": \"Europe/Warsaw\",\n  \"datetime\": \"2024-01-01T13:00:00+01:00\",\n  \"is_dst\": false\n}\n```\n\n2. Convert time between timezones:\n```json\n{\n  \"name\": \"convert_time\",\n  \"arguments\": {\n    \"source_timezone\": \"America/New_York\",\n    \"time\": \"16:30\",\n    \"target_timezone\": \"Asia/Tokyo\"\n  }\n}\n```\nResponse:\n```json\n{\n  \"source\": {\n    \"timezone\": \"America/New_York\",\n    \"datetime\": \"2024-01-01T12:30:00-05:00\",\n    \"is_dst\": false\n  },\n  \"target\": {\n    \"timezone\": \"Asia/Tokyo\",\n    \"datetime\": \"2024-01-01T12:30:00+09:00\",\n    \"is_dst\": false\n  },\n  \"time_difference\": \"+13.0h\",\n}\n```\n\n## Debugging\n\nYou can use the MCP inspector to debug the server. For uvx installations:\n\n```bash\nnpx @modelcontextprotocol/inspector uvx mcp-server-time\n```\n\nOr if you've installed the package in a specific directory or are developing on it:\n\n```bash\ncd path/to/servers/src/time\nnpx @modelcontextprotocol/inspector uv run mcp-server-time\n```\n\n## Examples of Questions for Claude\n\n1. \"What time is it now?\" (will use system timezone)\n2. \"What time is it in Tokyo?\"\n3. \"When it's 4 PM in New York, what time is it in London?\"\n4. \"Convert 9:30 AM Tokyo time to New York time\"\n\n## Build\n\nDocker build:\n\n```bash\ncd src/time\ndocker build -t mcp/time .\n```\n\n## Contributing\n\nWe encourage contributions to help expand and improve mcp-server-time. Whether you want to add new time-related tools, enhance existing functionality, or improve documentation, your input is valuable.\n\nFor examples of other MCP servers and implementation patterns, see:\nhttps://github.com/modelcontextprotocol/servers\n\nPull requests are welcome! Feel free to contribute new ideas, bug fixes, or enhancements to make mcp-server-time even more powerful and useful.\n\n## License\n\nmcp-server-time is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository.\n"
  },
  {
    "path": "src/time/pyproject.toml",
    "content": "[project]\nname = \"mcp-server-time\"\nversion = \"0.6.2\"\ndescription = \"A Model Context Protocol server providing tools for time queries and timezone conversions for LLMs\"\nreadme = \"README.md\"\nrequires-python = \">=3.10\"\nauthors = [\n    { name = \"Mariusz 'maledorak' Korzekwa\", email = \"mariusz@korzekwa.dev\" },\n]\nkeywords = [\"time\", \"timezone\", \"mcp\", \"llm\"]\nlicense = { text = \"MIT\" }\nclassifiers = [\n    \"Development Status :: 4 - Beta\",\n    \"Intended Audience :: Developers\",\n    \"License :: OSI Approved :: MIT License\",\n    \"Programming Language :: Python :: 3\",\n    \"Programming Language :: Python :: 3.10\",\n]\ndependencies = [\n    \"mcp>=1.23.0\",\n    \"pydantic>=2.0.0\",\n    \"tzdata>=2024.2\",\n    \"tzlocal>=5.3.1\",\n]\n\n[project.scripts]\nmcp-server-time = \"mcp_server_time:main\"\n\n[build-system]\nrequires = [\"hatchling\"]\nbuild-backend = \"hatchling.build\"\n\n[tool.uv]\ndev-dependencies = [\n    \"freezegun>=1.5.1\",\n    \"pyright>=1.1.389\",\n    \"pytest>=8.3.3\",\n    \"ruff>=0.8.1\",\n]\n"
  },
  {
    "path": "src/time/src/mcp_server_time/__init__.py",
    "content": "from .server import serve\n\n\ndef main():\n    \"\"\"MCP Time Server - Time and timezone conversion functionality for MCP\"\"\"\n    import argparse\n    import asyncio\n\n    parser = argparse.ArgumentParser(\n        description=\"give a model the ability to handle time queries and timezone conversions\"\n    )\n    parser.add_argument(\"--local-timezone\", type=str, help=\"Override local timezone\")\n\n    args = parser.parse_args()\n    asyncio.run(serve(args.local_timezone))\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "src/time/src/mcp_server_time/__main__.py",
    "content": "from mcp_server_time import main\n\nmain()\n"
  },
  {
    "path": "src/time/src/mcp_server_time/server.py",
    "content": "from datetime import datetime, timedelta\nfrom enum import Enum\nimport json\nfrom typing import Sequence\n\nfrom zoneinfo import ZoneInfo\nfrom tzlocal import get_localzone_name  # ← returns \"Europe/Paris\", etc.\n\nfrom mcp.server import Server\nfrom mcp.server.stdio import stdio_server\nfrom mcp.types import Tool, ToolAnnotations, TextContent, ImageContent, EmbeddedResource, ErrorData, INVALID_PARAMS\nfrom mcp.shared.exceptions import McpError\n\nfrom pydantic import BaseModel\n\n\nclass TimeTools(str, Enum):\n    GET_CURRENT_TIME = \"get_current_time\"\n    CONVERT_TIME = \"convert_time\"\n\n\nclass TimeResult(BaseModel):\n    timezone: str\n    datetime: str\n    day_of_week: str\n    is_dst: bool\n\n\nclass TimeConversionResult(BaseModel):\n    source: TimeResult\n    target: TimeResult\n    time_difference: str\n\n\nclass TimeConversionInput(BaseModel):\n    source_tz: str\n    time: str\n    target_tz_list: list[str]\n\n\ndef get_local_tz(local_tz_override: str | None = None) -> ZoneInfo:\n    if local_tz_override:\n        return ZoneInfo(local_tz_override)\n\n    # Get local timezone from datetime.now()\n    local_tzname = get_localzone_name()\n    if local_tzname is not None:\n        return ZoneInfo(local_tzname)\n    # Default to UTC if local timezone cannot be determined\n    return ZoneInfo(\"UTC\")\n\n\ndef get_zoneinfo(timezone_name: str) -> ZoneInfo:\n    try:\n        return ZoneInfo(timezone_name)\n    except Exception as e:\n        raise McpError(ErrorData(code=INVALID_PARAMS, message=f\"Invalid timezone: {str(e)}\"))\n\n\nclass TimeServer:\n    def get_current_time(self, timezone_name: str) -> TimeResult:\n        \"\"\"Get current time in specified timezone\"\"\"\n        timezone = get_zoneinfo(timezone_name)\n        current_time = datetime.now(timezone)\n\n        return TimeResult(\n            timezone=timezone_name,\n            datetime=current_time.isoformat(timespec=\"seconds\"),\n            day_of_week=current_time.strftime(\"%A\"),\n            is_dst=bool(current_time.dst()),\n        )\n\n    def convert_time(\n        self, source_tz: str, time_str: str, target_tz: str\n    ) -> TimeConversionResult:\n        \"\"\"Convert time between timezones\"\"\"\n        source_timezone = get_zoneinfo(source_tz)\n        target_timezone = get_zoneinfo(target_tz)\n\n        try:\n            parsed_time = datetime.strptime(time_str, \"%H:%M\").time()\n        except ValueError:\n            raise ValueError(\"Invalid time format. Expected HH:MM [24-hour format]\")\n\n        now = datetime.now(source_timezone)\n        source_time = datetime(\n            now.year,\n            now.month,\n            now.day,\n            parsed_time.hour,\n            parsed_time.minute,\n            tzinfo=source_timezone,\n        )\n\n        target_time = source_time.astimezone(target_timezone)\n        source_offset = source_time.utcoffset() or timedelta()\n        target_offset = target_time.utcoffset() or timedelta()\n        hours_difference = (target_offset - source_offset).total_seconds() / 3600\n\n        if hours_difference.is_integer():\n            time_diff_str = f\"{hours_difference:+.1f}h\"\n        else:\n            # For fractional hours like Nepal's UTC+5:45\n            time_diff_str = f\"{hours_difference:+.2f}\".rstrip(\"0\").rstrip(\".\") + \"h\"\n\n        return TimeConversionResult(\n            source=TimeResult(\n                timezone=source_tz,\n                datetime=source_time.isoformat(timespec=\"seconds\"),\n                day_of_week=source_time.strftime(\"%A\"),\n                is_dst=bool(source_time.dst()),\n            ),\n            target=TimeResult(\n                timezone=target_tz,\n                datetime=target_time.isoformat(timespec=\"seconds\"),\n                day_of_week=target_time.strftime(\"%A\"),\n                is_dst=bool(target_time.dst()),\n            ),\n            time_difference=time_diff_str,\n        )\n\n\nasync def serve(local_timezone: str | None = None) -> None:\n    server = Server(\"mcp-time\")\n    time_server = TimeServer()\n    local_tz = str(get_local_tz(local_timezone))\n\n    @server.list_tools()\n    async def list_tools() -> list[Tool]:\n        \"\"\"List available time tools.\"\"\"\n        return [\n            Tool(\n                name=TimeTools.GET_CURRENT_TIME.value,\n                description=\"Get current time in a specific timezones\",\n                inputSchema={\n                    \"type\": \"object\",\n                    \"properties\": {\n                        \"timezone\": {\n                            \"type\": \"string\",\n                            \"description\": f\"IANA timezone name (e.g., 'America/New_York', 'Europe/London'). Use '{local_tz}' as local timezone if no timezone provided by the user.\",\n                        }\n                    },\n                    \"required\": [\"timezone\"],\n                },\n                annotations=ToolAnnotations(\n                    readOnlyHint=True,\n                    destructiveHint=False,\n                    idempotentHint=True,\n                    openWorldHint=False,\n                ),\n            ),\n            Tool(\n                name=TimeTools.CONVERT_TIME.value,\n                description=\"Convert time between timezones\",\n                inputSchema={\n                    \"type\": \"object\",\n                    \"properties\": {\n                        \"source_timezone\": {\n                            \"type\": \"string\",\n                            \"description\": f\"Source IANA timezone name (e.g., 'America/New_York', 'Europe/London'). Use '{local_tz}' as local timezone if no source timezone provided by the user.\",\n                        },\n                        \"time\": {\n                            \"type\": \"string\",\n                            \"description\": \"Time to convert in 24-hour format (HH:MM)\",\n                        },\n                        \"target_timezone\": {\n                            \"type\": \"string\",\n                            \"description\": f\"Target IANA timezone name (e.g., 'Asia/Tokyo', 'America/San_Francisco'). Use '{local_tz}' as local timezone if no target timezone provided by the user.\",\n                        },\n                    },\n                    \"required\": [\"source_timezone\", \"time\", \"target_timezone\"],\n                },\n                annotations=ToolAnnotations(\n                    readOnlyHint=True,\n                    destructiveHint=False,\n                    idempotentHint=True,\n                    openWorldHint=False,\n                ),\n            ),\n        ]\n\n    @server.call_tool()\n    async def call_tool(\n        name: str, arguments: dict\n    ) -> Sequence[TextContent | ImageContent | EmbeddedResource]:\n        \"\"\"Handle tool calls for time queries.\"\"\"\n        try:\n            match name:\n                case TimeTools.GET_CURRENT_TIME.value:\n                    timezone = arguments.get(\"timezone\")\n                    if not timezone:\n                        raise ValueError(\"Missing required argument: timezone\")\n\n                    result = time_server.get_current_time(timezone)\n\n                case TimeTools.CONVERT_TIME.value:\n                    if not all(\n                        k in arguments\n                        for k in [\"source_timezone\", \"time\", \"target_timezone\"]\n                    ):\n                        raise ValueError(\"Missing required arguments\")\n\n                    result = time_server.convert_time(\n                        arguments[\"source_timezone\"],\n                        arguments[\"time\"],\n                        arguments[\"target_timezone\"],\n                    )\n                case _:\n                    raise ValueError(f\"Unknown tool: {name}\")\n\n            return [\n                TextContent(type=\"text\", text=json.dumps(result.model_dump(), indent=2))\n            ]\n\n        except Exception as e:\n            raise ValueError(f\"Error processing mcp-server-time query: {str(e)}\")\n\n    options = server.create_initialization_options()\n    async with stdio_server() as (read_stream, write_stream):\n        await server.run(read_stream, write_stream, options)\n"
  },
  {
    "path": "src/time/test/time_server_test.py",
    "content": "\nfrom freezegun import freeze_time\nfrom mcp.shared.exceptions import McpError\nimport pytest\nfrom unittest.mock import patch\nfrom zoneinfo import ZoneInfo\n\nfrom mcp_server_time.server import TimeServer, get_local_tz\n\n\n@pytest.mark.parametrize(\n    \"test_time,timezone,expected\",\n    [\n        # UTC+1 non-DST\n        (\n            \"2024-01-01 12:00:00+00:00\",\n            \"Europe/Warsaw\",\n            {\n                \"timezone\": \"Europe/Warsaw\",\n                \"datetime\": \"2024-01-01T13:00:00+01:00\",\n                \"is_dst\": False,\n            },\n        ),\n        # UTC non-DST\n        (\n            \"2024-01-01 12:00:00+00:00\",\n            \"Europe/London\",\n            {\n                \"timezone\": \"Europe/London\",\n                \"datetime\": \"2024-01-01T12:00:00+00:00\",\n                \"is_dst\": False,\n            },\n        ),\n        # UTC-5 non-DST\n        (\n            \"2024-01-01 12:00:00-00:00\",\n            \"America/New_York\",\n            {\n                \"timezone\": \"America/New_York\",\n                \"datetime\": \"2024-01-01T07:00:00-05:00\",\n                \"is_dst\": False,\n            },\n        ),\n        # UTC+1 DST\n        (\n            \"2024-03-31 12:00:00+00:00\",\n            \"Europe/Warsaw\",\n            {\n                \"timezone\": \"Europe/Warsaw\",\n                \"datetime\": \"2024-03-31T14:00:00+02:00\",\n                \"is_dst\": True,\n            },\n        ),\n        # UTC DST\n        (\n            \"2024-03-31 12:00:00+00:00\",\n            \"Europe/London\",\n            {\n                \"timezone\": \"Europe/London\",\n                \"datetime\": \"2024-03-31T13:00:00+01:00\",\n                \"is_dst\": True,\n            },\n        ),\n        # UTC-5 DST\n        (\n            \"2024-03-31 12:00:00-00:00\",\n            \"America/New_York\",\n            {\n                \"timezone\": \"America/New_York\",\n                \"datetime\": \"2024-03-31T08:00:00-04:00\",\n                \"is_dst\": True,\n            },\n        ),\n    ],\n)\ndef test_get_current_time(test_time, timezone, expected):\n    with freeze_time(test_time):\n        time_server = TimeServer()\n        result = time_server.get_current_time(timezone)\n        assert result.timezone == expected[\"timezone\"]\n        assert result.datetime == expected[\"datetime\"]\n        assert result.is_dst == expected[\"is_dst\"]\n\n\ndef test_get_current_time_with_invalid_timezone():\n    time_server = TimeServer()\n    with pytest.raises(\n        McpError,\n        match=r\"Invalid timezone: 'No time zone found with key Invalid/Timezone'\",\n    ):\n        time_server.get_current_time(\"Invalid/Timezone\")\n\n\n@pytest.mark.parametrize(\n    \"source_tz,time_str,target_tz,expected_error\",\n    [\n        (\n            \"invalid_tz\",\n            \"12:00\",\n            \"Europe/London\",\n            \"Invalid timezone: 'No time zone found with key invalid_tz'\",\n        ),\n        (\n            \"Europe/Warsaw\",\n            \"12:00\",\n            \"invalid_tz\",\n            \"Invalid timezone: 'No time zone found with key invalid_tz'\",\n        ),\n        (\n            \"Europe/Warsaw\",\n            \"25:00\",\n            \"Europe/London\",\n            \"Invalid time format. Expected HH:MM [24-hour format]\",\n        ),\n    ],\n)\ndef test_convert_time_errors(source_tz, time_str, target_tz, expected_error):\n    time_server = TimeServer()\n    with pytest.raises((McpError, ValueError), match=expected_error):\n        time_server.convert_time(source_tz, time_str, target_tz)\n\n\n@pytest.mark.parametrize(\n    \"test_time,source_tz,time_str,target_tz,expected\",\n    [\n        # Basic case: Standard time conversion between Warsaw and London (1 hour difference)\n        # Warsaw is UTC+1, London is UTC+0\n        (\n            \"2024-01-01 00:00:00+00:00\",\n            \"Europe/Warsaw\",\n            \"12:00\",\n            \"Europe/London\",\n            {\n                \"source\": {\n                    \"timezone\": \"Europe/Warsaw\",\n                    \"datetime\": \"2024-01-01T12:00:00+01:00\",\n                    \"is_dst\": False,\n                },\n                \"target\": {\n                    \"timezone\": \"Europe/London\",\n                    \"datetime\": \"2024-01-01T11:00:00+00:00\",\n                    \"is_dst\": False,\n                },\n                \"time_difference\": \"-1.0h\",\n            },\n        ),\n        # Reverse case of above: London to Warsaw conversion\n        # Shows how time difference is positive when going east\n        (\n            \"2024-01-01 00:00:00+00:00\",\n            \"Europe/London\",\n            \"12:00\",\n            \"Europe/Warsaw\",\n            {\n                \"source\": {\n                    \"timezone\": \"Europe/London\",\n                    \"datetime\": \"2024-01-01T12:00:00+00:00\",\n                    \"is_dst\": False,\n                },\n                \"target\": {\n                    \"timezone\": \"Europe/Warsaw\",\n                    \"datetime\": \"2024-01-01T13:00:00+01:00\",\n                    \"is_dst\": False,\n                },\n                \"time_difference\": \"+1.0h\",\n            },\n        ),\n        # Edge case: Different DST periods between Europe and USA\n        # Europe ends DST on Oct 27, while USA waits until Nov 3\n        # This creates a one-week period where Europe is in standard time but USA still observes DST\n        (\n            \"2024-10-28 00:00:00+00:00\",\n            \"Europe/Warsaw\",\n            \"12:00\",\n            \"America/New_York\",\n            {\n                \"source\": {\n                    \"timezone\": \"Europe/Warsaw\",\n                    \"datetime\": \"2024-10-28T12:00:00+01:00\",\n                    \"is_dst\": False,\n                },\n                \"target\": {\n                    \"timezone\": \"America/New_York\",\n                    \"datetime\": \"2024-10-28T07:00:00-04:00\",\n                    \"is_dst\": True,\n                },\n                \"time_difference\": \"-5.0h\",\n            },\n        ),\n        # Follow-up to previous case: After both regions end DST\n        # Shows how time difference increases by 1 hour when USA also ends DST\n        (\n            \"2024-11-04 00:00:00+00:00\",\n            \"Europe/Warsaw\",\n            \"12:00\",\n            \"America/New_York\",\n            {\n                \"source\": {\n                    \"timezone\": \"Europe/Warsaw\",\n                    \"datetime\": \"2024-11-04T12:00:00+01:00\",\n                    \"is_dst\": False,\n                },\n                \"target\": {\n                    \"timezone\": \"America/New_York\",\n                    \"datetime\": \"2024-11-04T06:00:00-05:00\",\n                    \"is_dst\": False,\n                },\n                \"time_difference\": \"-6.0h\",\n            },\n        ),\n        # Edge case: Nepal's unusual UTC+5:45 offset\n        # One of the few time zones using 45-minute offset\n        (\n            \"2024-01-01 00:00:00+00:00\",\n            \"Europe/Warsaw\",\n            \"12:00\",\n            \"Asia/Kathmandu\",\n            {\n                \"source\": {\n                    \"timezone\": \"Europe/Warsaw\",\n                    \"datetime\": \"2024-01-01T12:00:00+01:00\",\n                    \"is_dst\": False,\n                },\n                \"target\": {\n                    \"timezone\": \"Asia/Kathmandu\",\n                    \"datetime\": \"2024-01-01T16:45:00+05:45\",\n                    \"is_dst\": False,\n                },\n                \"time_difference\": \"+4.75h\",\n            },\n        ),\n        # Reverse case for Nepal\n        # Demonstrates how 45-minute offset works in opposite direction\n        (\n            \"2024-01-01 00:00:00+00:00\",\n            \"Asia/Kathmandu\",\n            \"12:00\",\n            \"Europe/Warsaw\",\n            {\n                \"source\": {\n                    \"timezone\": \"Asia/Kathmandu\",\n                    \"datetime\": \"2024-01-01T12:00:00+05:45\",\n                    \"is_dst\": False,\n                },\n                \"target\": {\n                    \"timezone\": \"Europe/Warsaw\",\n                    \"datetime\": \"2024-01-01T07:15:00+01:00\",\n                    \"is_dst\": False,\n                },\n                \"time_difference\": \"-4.75h\",\n            },\n        ),\n        # Edge case: Lord Howe Island's unique DST rules\n        # One of the few places using 30-minute DST shift\n        # During summer (DST), they use UTC+11\n        (\n            \"2024-01-01 00:00:00+00:00\",\n            \"Europe/Warsaw\",\n            \"12:00\",\n            \"Australia/Lord_Howe\",\n            {\n                \"source\": {\n                    \"timezone\": \"Europe/Warsaw\",\n                    \"datetime\": \"2024-01-01T12:00:00+01:00\",\n                    \"is_dst\": False,\n                },\n                \"target\": {\n                    \"timezone\": \"Australia/Lord_Howe\",\n                    \"datetime\": \"2024-01-01T22:00:00+11:00\",\n                    \"is_dst\": True,\n                },\n                \"time_difference\": \"+10.0h\",\n            },\n        ),\n        # Second Lord Howe Island case: During their standard time\n        # Shows transition to UTC+10:30 after DST ends\n        (\n            \"2024-04-07 00:00:00+00:00\",\n            \"Europe/Warsaw\",\n            \"12:00\",\n            \"Australia/Lord_Howe\",\n            {\n                \"source\": {\n                    \"timezone\": \"Europe/Warsaw\",\n                    \"datetime\": \"2024-04-07T12:00:00+02:00\",\n                    \"is_dst\": True,\n                },\n                \"target\": {\n                    \"timezone\": \"Australia/Lord_Howe\",\n                    \"datetime\": \"2024-04-07T20:30:00+10:30\",\n                    \"is_dst\": False,\n                },\n                \"time_difference\": \"+8.5h\",\n            },\n        ),\n        # Edge case: Date line crossing with Samoa\n        # Demonstrates how a single time conversion can result in a date change\n        # Samoa is UTC+13, creating almost a full day difference with Warsaw\n        (\n            \"2024-01-01 00:00:00+00:00\",\n            \"Europe/Warsaw\",\n            \"23:00\",\n            \"Pacific/Apia\",\n            {\n                \"source\": {\n                    \"timezone\": \"Europe/Warsaw\",\n                    \"datetime\": \"2024-01-01T23:00:00+01:00\",\n                    \"is_dst\": False,\n                },\n                \"target\": {\n                    \"timezone\": \"Pacific/Apia\",\n                    \"datetime\": \"2024-01-02T11:00:00+13:00\",\n                    \"is_dst\": False,\n                },\n                \"time_difference\": \"+12.0h\",\n            },\n        ),\n        # Edge case: Iran's unusual half-hour offset\n        # Demonstrates conversion with Iran's UTC+3:30 timezone\n        (\n            \"2024-03-21 00:00:00+00:00\",\n            \"Europe/Warsaw\",\n            \"12:00\",\n            \"Asia/Tehran\",\n            {\n                \"source\": {\n                    \"timezone\": \"Europe/Warsaw\",\n                    \"datetime\": \"2024-03-21T12:00:00+01:00\",\n                    \"is_dst\": False,\n                },\n                \"target\": {\n                    \"timezone\": \"Asia/Tehran\",\n                    \"datetime\": \"2024-03-21T14:30:00+03:30\",\n                    \"is_dst\": False,\n                },\n                \"time_difference\": \"+2.5h\",\n            },\n        ),\n        # Edge case: Venezuela's unusual -4:30 offset (historical)\n        # In 2016, Venezuela moved from -4:30 to -4:00\n        # Useful for testing historical dates\n        (\n            \"2016-04-30 00:00:00+00:00\",  # Just before the change\n            \"Europe/Warsaw\",\n            \"12:00\",\n            \"America/Caracas\",\n            {\n                \"source\": {\n                    \"timezone\": \"Europe/Warsaw\",\n                    \"datetime\": \"2016-04-30T12:00:00+02:00\",\n                    \"is_dst\": True,\n                },\n                \"target\": {\n                    \"timezone\": \"America/Caracas\",\n                    \"datetime\": \"2016-04-30T05:30:00-04:30\",\n                    \"is_dst\": False,\n                },\n                \"time_difference\": \"-6.5h\",\n            },\n        ),\n        # Edge case: Israel's variable DST\n        # Israel's DST changes don't follow a fixed pattern\n        # They often change dates year-to-year based on Hebrew calendar\n        (\n            \"2024-10-27 00:00:00+00:00\",\n            \"Europe/Warsaw\",\n            \"12:00\",\n            \"Asia/Jerusalem\",\n            {\n                \"source\": {\n                    \"timezone\": \"Europe/Warsaw\",\n                    \"datetime\": \"2024-10-27T12:00:00+01:00\",\n                    \"is_dst\": False,\n                },\n                \"target\": {\n                    \"timezone\": \"Asia/Jerusalem\",\n                    \"datetime\": \"2024-10-27T13:00:00+02:00\",\n                    \"is_dst\": False,\n                },\n                \"time_difference\": \"+1.0h\",\n            },\n        ),\n        # Edge case: Antarctica/Troll station\n        # Only timezone that uses UTC+0 in winter and UTC+2 in summer\n        # One of the few zones with exactly 2 hours DST difference\n        (\n            \"2024-03-31 00:00:00+00:00\",\n            \"Europe/Warsaw\",\n            \"12:00\",\n            \"Antarctica/Troll\",\n            {\n                \"source\": {\n                    \"timezone\": \"Europe/Warsaw\",\n                    \"datetime\": \"2024-03-31T12:00:00+02:00\",\n                    \"is_dst\": True,\n                },\n                \"target\": {\n                    \"timezone\": \"Antarctica/Troll\",\n                    \"datetime\": \"2024-03-31T12:00:00+02:00\",\n                    \"is_dst\": True,\n                },\n                \"time_difference\": \"+0.0h\",\n            },\n        ),\n        # Edge case: Kiribati date line anomaly\n        # After skipping Dec 31, 1994, eastern Kiribati is UTC+14\n        # The furthest forward timezone in the world\n        (\n            \"2024-01-01 00:00:00+00:00\",\n            \"Europe/Warsaw\",\n            \"23:00\",\n            \"Pacific/Kiritimati\",\n            {\n                \"source\": {\n                    \"timezone\": \"Europe/Warsaw\",\n                    \"datetime\": \"2024-01-01T23:00:00+01:00\",\n                    \"is_dst\": False,\n                },\n                \"target\": {\n                    \"timezone\": \"Pacific/Kiritimati\",\n                    \"datetime\": \"2024-01-02T12:00:00+14:00\",\n                    \"is_dst\": False,\n                },\n                \"time_difference\": \"+13.0h\",\n            },\n        ),\n        # Edge case: Chatham Islands, New Zealand\n        # Uses unusual 45-minute offset AND observes DST\n        # UTC+12:45 in standard time, UTC+13:45 in DST\n        (\n            \"2024-01-01 00:00:00+00:00\",\n            \"Europe/Warsaw\",\n            \"12:00\",\n            \"Pacific/Chatham\",\n            {\n                \"source\": {\n                    \"timezone\": \"Europe/Warsaw\",\n                    \"datetime\": \"2024-01-01T12:00:00+01:00\",\n                    \"is_dst\": False,\n                },\n                \"target\": {\n                    \"timezone\": \"Pacific/Chatham\",\n                    \"datetime\": \"2024-01-02T00:45:00+13:45\",\n                    \"is_dst\": True,\n                },\n                \"time_difference\": \"+12.75h\",\n            },\n        ),\n    ],\n)\ndef test_convert_time(test_time, source_tz, time_str, target_tz, expected):\n    with freeze_time(test_time):\n        time_server = TimeServer()\n        result = time_server.convert_time(source_tz, time_str, target_tz)\n\n        assert result.source.timezone == expected[\"source\"][\"timezone\"]\n        assert result.target.timezone == expected[\"target\"][\"timezone\"]\n        assert result.source.datetime == expected[\"source\"][\"datetime\"]\n        assert result.target.datetime == expected[\"target\"][\"datetime\"]\n        assert result.source.is_dst == expected[\"source\"][\"is_dst\"]\n        assert result.target.is_dst == expected[\"target\"][\"is_dst\"]\n        assert result.time_difference == expected[\"time_difference\"]\n\n\ndef test_get_local_tz_with_override():\n    \"\"\"Test that timezone override works correctly.\"\"\"\n    result = get_local_tz(\"America/New_York\")\n    assert str(result) == \"America/New_York\"\n    assert isinstance(result, ZoneInfo)\n\n\ndef test_get_local_tz_with_invalid_override():\n    \"\"\"Test that invalid timezone override raises an error.\"\"\"\n    with pytest.raises(Exception):  # ZoneInfo will raise an exception\n        get_local_tz(\"Invalid/Timezone\")\n\n\n@patch('mcp_server_time.server.get_localzone_name')\ndef test_get_local_tz_with_valid_iana_name(mock_get_localzone):\n    \"\"\"Test that valid IANA timezone names from tzlocal work correctly.\"\"\"\n    mock_get_localzone.return_value = \"Europe/London\"\n    result = get_local_tz()\n    assert str(result) == \"Europe/London\"\n    assert isinstance(result, ZoneInfo)\n\n\n@patch('mcp_server_time.server.get_localzone_name')\ndef test_get_local_tz_when_none_returned(mock_get_localzone):\n    \"\"\"Test default to UTC when tzlocal returns None.\"\"\"\n    mock_get_localzone.return_value = None\n    result = get_local_tz()\n    assert str(result) == \"UTC\"\n\n\n@patch('mcp_server_time.server.get_localzone_name')\ndef test_get_local_tz_handles_windows_timezones(mock_get_localzone):\n    \"\"\"Test that tzlocal properly handles Windows timezone names.\n    \n    Note: tzlocal should convert Windows names like 'Pacific Standard Time'\n    to proper IANA names like 'America/Los_Angeles'.\n    \"\"\"\n    # tzlocal should return IANA names even on Windows\n    mock_get_localzone.return_value = \"America/Los_Angeles\"\n    result = get_local_tz()\n    assert str(result) == \"America/Los_Angeles\"\n    assert isinstance(result, ZoneInfo)\n\n\n@pytest.mark.parametrize(\n    \"timezone_name\",\n    [\n        \"America/New_York\",\n        \"Europe/Paris\", \n        \"Asia/Tokyo\",\n        \"Australia/Sydney\",\n        \"Africa/Cairo\",\n        \"America/Sao_Paulo\",\n        \"Pacific/Auckland\",\n        \"UTC\",\n    ],\n)\n@patch('mcp_server_time.server.get_localzone_name')\ndef test_get_local_tz_various_timezones(mock_get_localzone, timezone_name):\n    \"\"\"Test various timezone names that tzlocal might return.\"\"\"\n    mock_get_localzone.return_value = timezone_name\n    result = get_local_tz()\n    assert str(result) == timezone_name\n    assert isinstance(result, ZoneInfo)\n"
  },
  {
    "path": "tsconfig.json",
    "content": "{\n  \"compilerOptions\": {\n    \"target\": \"ES2022\",\n    \"module\": \"Node16\",\n    \"moduleResolution\": \"Node16\",\n    \"strict\": true,\n    \"esModuleInterop\": true,\n    \"skipLibCheck\": true,\n    \"forceConsistentCasingInFileNames\": true,\n    \"resolveJsonModule\": true\n  },\n  \"include\": [\"src/**/*\"],\n  \"exclude\": [\"node_modules\"]\n}\n"
  }
]