Repository: anthropics/claude-code Branch: main Commit: 16536693ecc0 Files: 183 Total size: 1.0 MB Directory structure: gitextract_l9ej6kic/ ├── .claude/ │ └── commands/ │ ├── commit-push-pr.md │ ├── dedupe.md │ └── triage-issue.md ├── .claude-plugin/ │ └── marketplace.json ├── .devcontainer/ │ ├── Dockerfile │ ├── devcontainer.json │ └── init-firewall.sh ├── .gitattributes ├── .github/ │ ├── ISSUE_TEMPLATE/ │ │ ├── bug_report.yml │ │ ├── config.yml │ │ ├── documentation.yml │ │ ├── feature_request.yml │ │ └── model_behavior.yml │ └── workflows/ │ ├── auto-close-duplicates.yml │ ├── backfill-duplicate-comments.yml │ ├── claude-dedupe-issues.yml │ ├── claude-issue-triage.yml │ ├── claude.yml │ ├── issue-lifecycle-comment.yml │ ├── issue-opened-dispatch.yml │ ├── lock-closed-issues.yml │ ├── log-issue-events.yml │ ├── non-write-users-check.yml │ ├── remove-autoclose-label.yml │ └── sweep.yml ├── .gitignore ├── .vscode/ │ └── extensions.json ├── CHANGELOG.md ├── LICENSE.md ├── README.md ├── SECURITY.md ├── Script/ │ └── run_devcontainer_claude_code.ps1 ├── examples/ │ ├── hooks/ │ │ └── bash_command_validator_example.py │ └── settings/ │ ├── README.md │ ├── settings-bash-sandbox.json │ ├── settings-lax.json │ └── settings-strict.json ├── plugins/ │ ├── README.md │ ├── agent-sdk-dev/ │ │ ├── .claude-plugin/ │ │ │ └── plugin.json │ │ ├── README.md │ │ ├── agents/ │ │ │ ├── agent-sdk-verifier-py.md │ │ │ └── agent-sdk-verifier-ts.md │ │ └── commands/ │ │ └── new-sdk-app.md │ ├── claude-opus-4-5-migration/ │ │ ├── .claude-plugin/ │ │ │ └── plugin.json │ │ ├── README.md │ │ └── skills/ │ │ └── claude-opus-4-5-migration/ │ │ ├── SKILL.md │ │ └── references/ │ │ ├── effort.md │ │ └── prompt-snippets.md │ ├── code-review/ │ │ ├── .claude-plugin/ │ │ │ └── plugin.json │ │ ├── README.md │ │ └── commands/ │ │ └── code-review.md │ ├── commit-commands/ │ │ ├── .claude-plugin/ │ │ │ └── plugin.json │ │ ├── README.md │ │ └── commands/ │ │ ├── clean_gone.md │ │ ├── commit-push-pr.md │ │ └── commit.md │ ├── explanatory-output-style/ │ │ ├── .claude-plugin/ │ │ │ └── plugin.json │ │ ├── README.md │ │ ├── hooks/ │ │ │ └── hooks.json │ │ └── hooks-handlers/ │ │ └── session-start.sh │ ├── feature-dev/ │ │ ├── .claude-plugin/ │ │ │ └── plugin.json │ │ ├── README.md │ │ ├── agents/ │ │ │ ├── code-architect.md │ │ │ ├── code-explorer.md │ │ │ └── code-reviewer.md │ │ └── commands/ │ │ └── feature-dev.md │ ├── frontend-design/ │ │ ├── .claude-plugin/ │ │ │ └── plugin.json │ │ ├── README.md │ │ └── skills/ │ │ └── frontend-design/ │ │ └── SKILL.md │ ├── hookify/ │ │ ├── .claude-plugin/ │ │ │ └── plugin.json │ │ ├── .gitignore │ │ ├── README.md │ │ ├── agents/ │ │ │ └── conversation-analyzer.md │ │ ├── commands/ │ │ │ ├── configure.md │ │ │ ├── help.md │ │ │ ├── hookify.md │ │ │ └── list.md │ │ ├── core/ │ │ │ ├── __init__.py │ │ │ ├── config_loader.py │ │ │ └── rule_engine.py │ │ ├── examples/ │ │ │ ├── console-log-warning.local.md │ │ │ ├── dangerous-rm.local.md │ │ │ ├── require-tests-stop.local.md │ │ │ └── sensitive-files-warning.local.md │ │ ├── hooks/ │ │ │ ├── __init__.py │ │ │ ├── hooks.json │ │ │ ├── posttooluse.py │ │ │ ├── pretooluse.py │ │ │ ├── stop.py │ │ │ └── userpromptsubmit.py │ │ ├── matchers/ │ │ │ └── __init__.py │ │ ├── skills/ │ │ │ └── writing-rules/ │ │ │ └── SKILL.md │ │ └── utils/ │ │ └── __init__.py │ ├── learning-output-style/ │ │ ├── .claude-plugin/ │ │ │ └── plugin.json │ │ ├── README.md │ │ ├── hooks/ │ │ │ └── hooks.json │ │ └── hooks-handlers/ │ │ └── session-start.sh │ ├── plugin-dev/ │ │ ├── README.md │ │ ├── agents/ │ │ │ ├── agent-creator.md │ │ │ ├── plugin-validator.md │ │ │ └── skill-reviewer.md │ │ ├── commands/ │ │ │ └── create-plugin.md │ │ └── skills/ │ │ ├── agent-development/ │ │ │ ├── SKILL.md │ │ │ ├── examples/ │ │ │ │ ├── agent-creation-prompt.md │ │ │ │ └── complete-agent-examples.md │ │ │ ├── references/ │ │ │ │ ├── agent-creation-system-prompt.md │ │ │ │ ├── system-prompt-design.md │ │ │ │ └── triggering-examples.md │ │ │ └── scripts/ │ │ │ └── validate-agent.sh │ │ ├── command-development/ │ │ │ ├── README.md │ │ │ ├── SKILL.md │ │ │ ├── examples/ │ │ │ │ ├── plugin-commands.md │ │ │ │ └── simple-commands.md │ │ │ └── references/ │ │ │ ├── advanced-workflows.md │ │ │ ├── documentation-patterns.md │ │ │ ├── frontmatter-reference.md │ │ │ ├── interactive-commands.md │ │ │ ├── marketplace-considerations.md │ │ │ ├── plugin-features-reference.md │ │ │ └── testing-strategies.md │ │ ├── hook-development/ │ │ │ ├── SKILL.md │ │ │ ├── examples/ │ │ │ │ ├── load-context.sh │ │ │ │ ├── validate-bash.sh │ │ │ │ └── validate-write.sh │ │ │ ├── references/ │ │ │ │ ├── advanced.md │ │ │ │ ├── migration.md │ │ │ │ └── patterns.md │ │ │ └── scripts/ │ │ │ ├── README.md │ │ │ ├── hook-linter.sh │ │ │ ├── test-hook.sh │ │ │ └── validate-hook-schema.sh │ │ ├── mcp-integration/ │ │ │ ├── SKILL.md │ │ │ ├── examples/ │ │ │ │ ├── http-server.json │ │ │ │ ├── sse-server.json │ │ │ │ └── stdio-server.json │ │ │ └── references/ │ │ │ ├── authentication.md │ │ │ ├── server-types.md │ │ │ └── tool-usage.md │ │ ├── plugin-settings/ │ │ │ ├── SKILL.md │ │ │ ├── examples/ │ │ │ │ ├── create-settings-command.md │ │ │ │ ├── example-settings.md │ │ │ │ └── read-settings-hook.sh │ │ │ ├── references/ │ │ │ │ ├── parsing-techniques.md │ │ │ │ └── real-world-examples.md │ │ │ └── scripts/ │ │ │ ├── parse-frontmatter.sh │ │ │ └── validate-settings.sh │ │ ├── plugin-structure/ │ │ │ ├── README.md │ │ │ ├── SKILL.md │ │ │ ├── examples/ │ │ │ │ ├── advanced-plugin.md │ │ │ │ ├── minimal-plugin.md │ │ │ │ └── standard-plugin.md │ │ │ └── references/ │ │ │ ├── component-patterns.md │ │ │ └── manifest-reference.md │ │ └── skill-development/ │ │ ├── SKILL.md │ │ └── references/ │ │ └── skill-creator-original.md │ ├── pr-review-toolkit/ │ │ ├── .claude-plugin/ │ │ │ └── plugin.json │ │ ├── README.md │ │ ├── agents/ │ │ │ ├── code-reviewer.md │ │ │ ├── code-simplifier.md │ │ │ ├── comment-analyzer.md │ │ │ ├── pr-test-analyzer.md │ │ │ ├── silent-failure-hunter.md │ │ │ └── type-design-analyzer.md │ │ └── commands/ │ │ └── review-pr.md │ ├── ralph-wiggum/ │ │ ├── .claude-plugin/ │ │ │ └── plugin.json │ │ ├── README.md │ │ ├── commands/ │ │ │ ├── cancel-ralph.md │ │ │ ├── help.md │ │ │ └── ralph-loop.md │ │ ├── hooks/ │ │ │ ├── hooks.json │ │ │ └── stop-hook.sh │ │ └── scripts/ │ │ └── setup-ralph-loop.sh │ └── security-guidance/ │ ├── .claude-plugin/ │ │ └── plugin.json │ └── hooks/ │ ├── hooks.json │ └── security_reminder_hook.py └── scripts/ ├── auto-close-duplicates.ts ├── backfill-duplicate-comments.ts ├── comment-on-duplicates.sh ├── edit-issue-labels.sh ├── gh.sh ├── issue-lifecycle.ts ├── lifecycle-comment.ts └── sweep.ts ================================================ FILE CONTENTS ================================================ ================================================ FILE: .claude/commands/commit-push-pr.md ================================================ --- allowed-tools: Bash(git checkout --branch:*), Bash(git add:*), Bash(git status:*), Bash(git push:*), Bash(git commit:*), Bash(gh pr create:*) description: Commit, push, and open a PR --- ## Context - Current git status: !`git status` - Current git diff (staged and unstaged changes): !`git diff HEAD` - Current branch: !`git branch --show-current` ## Your task Based on the above changes: 1. Create a new branch if on main 2. Create a single commit with an appropriate message 3. Push the branch to origin 4. Create a pull request using `gh pr create` 5. You have the capability to call multiple tools in a single response. You MUST do all of the above in a single message. Do not use any other tools or do anything else. Do not send any other text or messages besides these tool calls. ================================================ FILE: .claude/commands/dedupe.md ================================================ --- allowed-tools: Bash(./scripts/gh.sh:*), Bash(./scripts/comment-on-duplicates.sh:*) description: Find duplicate GitHub issues --- Find up to 3 likely duplicate issues for a given GitHub issue. To do this, follow these steps precisely: 1. Use an agent to check if the Github issue (a) is closed, (b) does not need to be deduped (eg. because it is broad product feedback without a specific solution, or positive feedback), or (c) already has a duplicates comment that you made earlier. If so, do not proceed. 2. Use an agent to view a Github issue, and ask the agent to return a summary of the issue 3. Then, launch 5 parallel agents to search Github for duplicates of this issue, using diverse keywords and search approaches, using the summary from #1 4. Next, feed the results from #1 and #2 into another agent, so that it can filter out false positives, that are likely not actually duplicates of the original issue. If there are no duplicates remaining, do not proceed. 5. Finally, use the comment script to post duplicates: ``` ./scripts/comment-on-duplicates.sh --base-issue --potential-duplicates ``` Notes (be sure to tell this to your agents, too): - Use `./scripts/gh.sh` to interact with Github, rather than web fetch or raw `gh`. Examples: - `./scripts/gh.sh issue view 123` — view an issue - `./scripts/gh.sh issue view 123 --comments` — view with comments - `./scripts/gh.sh issue list --state open --limit 20` — list issues - `./scripts/gh.sh search issues "query" --limit 10` — search for issues - Do not use other tools, beyond `./scripts/gh.sh` and the comment script (eg. don't use other MCP servers, file edit, etc.) - Make a todo list first ================================================ FILE: .claude/commands/triage-issue.md ================================================ --- allowed-tools: Bash(./scripts/gh.sh:*),Bash(./scripts/edit-issue-labels.sh:*) description: Triage GitHub issues by analyzing and applying labels --- You're an issue triage assistant. Analyze the issue and manage labels. IMPORTANT: Don't post any comments or messages to the issue. Your only actions are adding or removing labels. Context: $ARGUMENTS TOOLS: - `./scripts/gh.sh` — wrapper for `gh` CLI. Only supports these subcommands and flags: - `./scripts/gh.sh label list` — fetch all available labels - `./scripts/gh.sh label list --limit 100` — fetch with limit - `./scripts/gh.sh issue view 123` — read issue title, body, and labels - `./scripts/gh.sh issue view 123 --comments` — read the conversation - `./scripts/gh.sh issue list --state open --limit 20` — list issues - `./scripts/gh.sh search issues "query"` — find similar or duplicate issues - `./scripts/gh.sh search issues "query" --limit 10` — search with limit - `./scripts/edit-issue-labels.sh --issue NUMBER --add-label LABEL --remove-label LABEL` — add or remove labels TASK: 1. Run `./scripts/gh.sh label list` to fetch the available labels. You may ONLY use labels from this list. Never invent new labels. 2. Run `./scripts/gh.sh issue view ISSUE_NUMBER` to read the issue details. 3. Run `./scripts/gh.sh issue view ISSUE_NUMBER --comments` to read the conversation. **If EVENT is "issues" (new issue):** 4. First, check if this issue is actually about Claude Code (the CLI/IDE tool). Issues about the Claude API, claude.ai, the Claude app, Anthropic billing, or other Anthropic products should be labeled `invalid`. If invalid, apply only that label and stop. 5. Analyze and apply category labels: - Type (bug, enhancement, question, etc.) - Technical areas and platform - Check for duplicates with `./scripts/gh.sh search issues`. Only mark as duplicate of OPEN issues. 6. Evaluate lifecycle labels: - `needs-repro` (bugs only, 7 days): Bug reports without clear steps to reproduce. A good repro has specific, followable steps that someone else could use to see the same issue. Do NOT apply if the user already provided error messages, logs, file paths, or a description of what they did. Don't require a specific format — narrative descriptions count. For model behavior issues (e.g. "Claude does X when it should do Y"), don't require traditional repro steps — examples and patterns are sufficient. - `needs-info` (bugs only, 7 days): The issue needs something from the community before it can progress — e.g. error messages, versions, environment details, or answers to follow-up questions. Don't apply to questions or enhancements. Do NOT apply if the user already provided version, environment, and error details. If the issue just needs engineering investigation, that's not `needs-info`. Issues with these labels are automatically closed after the timeout if there's no response. The goal is to avoid issues lingering without a clear next step. 7. Apply all selected labels: `./scripts/edit-issue-labels.sh --issue ISSUE_NUMBER --add-label "label1" --add-label "label2"` **If EVENT is "issue_comment" (comment on existing issue):** 4. Evaluate lifecycle labels based on the full conversation: - If the issue has `stale` or `autoclose`, remove the label — a new human comment means the issue is still active: `./scripts/edit-issue-labels.sh --issue ISSUE_NUMBER --remove-label "stale" --remove-label "autoclose"` - If the issue has `needs-repro` or `needs-info` and the missing information has now been provided, remove the label: `./scripts/edit-issue-labels.sh --issue ISSUE_NUMBER --remove-label "needs-repro"` - If the issue doesn't have lifecycle labels but clearly needs them (e.g., a maintainer asked for repro steps or more details), add the appropriate label. - Comments like "+1", "me too", "same here", or emoji reactions are NOT the missing information. Only remove `needs-repro` or `needs-info` when substantive details are actually provided. - Do NOT add or remove category labels (bug, enhancement, etc.) on comment events. GUIDELINES: - ONLY use labels from `./scripts/gh.sh label list` — never create or guess label names - DO NOT post any comments to the issue - Be conservative with lifecycle labels — only apply when clearly warranted - Only apply lifecycle labels (`needs-repro`, `needs-info`) to bugs — never to questions or enhancements - When in doubt, don't apply a lifecycle label — false positives are worse than missing labels - It's okay to not add any labels if none are clearly applicable ================================================ FILE: .claude-plugin/marketplace.json ================================================ { "$schema": "https://anthropic.com/claude-code/marketplace.schema.json", "name": "claude-code-plugins", "version": "1.0.0", "description": "Bundled plugins for Claude Code including Agent SDK development tools, PR review toolkit, and commit workflows", "owner": { "name": "Anthropic", "email": "support@anthropic.com" }, "plugins": [ { "name": "agent-sdk-dev", "description": "Development kit for working with the Claude Agent SDK", "source": "./plugins/agent-sdk-dev", "category": "development" }, { "name": "claude-opus-4-5-migration", "description": "Migrate your code and prompts from Sonnet 4.x and Opus 4.1 to Opus 4.5.", "version": "1.0.0", "author": { "name": "William Hu", "email": "whu@anthropic.com" }, "source": "./plugins/claude-opus-4-5-migration", "category": "development" }, { "name": "code-review", "description": "Automated code review for pull requests using multiple specialized agents with confidence-based scoring to filter false positives", "version": "1.0.0", "author": { "name": "Boris Cherny", "email": "boris@anthropic.com" }, "source": "./plugins/code-review", "category": "productivity" }, { "name": "commit-commands", "description": "Commands for git commit workflows including commit, push, and PR creation", "version": "1.0.0", "author": { "name": "Anthropic", "email": "support@anthropic.com" }, "source": "./plugins/commit-commands", "category": "productivity" }, { "name": "explanatory-output-style", "description": "Adds educational insights about implementation choices and codebase patterns (mimics the deprecated Explanatory output style)", "version": "1.0.0", "author": { "name": "Dickson Tsai", "email": "dickson@anthropic.com" }, "source": "./plugins/explanatory-output-style", "category": "learning" }, { "name": "feature-dev", "description": "Comprehensive feature development workflow with specialized agents for codebase exploration, architecture design, and quality review", "version": "1.0.0", "author": { "name": "Siddharth Bidasaria", "email": "sbidasaria@anthropic.com" }, "source": "./plugins/feature-dev", "category": "development" }, { "name": "frontend-design", "description": "Create distinctive, production-grade frontend interfaces with high design quality. Generates creative, polished code that avoids generic AI aesthetics.", "version": "1.0.0", "author": { "name": "Prithvi Rajasekaran & Alexander Bricken", "email": "prithvi@anthropic.com" }, "source": "./plugins/frontend-design", "category": "development" }, { "name": "hookify", "description": "Easily create custom hooks to prevent unwanted behaviors by analyzing conversation patterns or from explicit instructions. Define rules via simple markdown files.", "version": "0.1.0", "author": { "name": "Daisy Hollman", "email": "daisy@anthropic.com" }, "source": "./plugins/hookify", "category": "productivity" }, { "name": "learning-output-style", "description": "Interactive learning mode that requests meaningful code contributions at decision points (mimics the unshipped Learning output style)", "version": "1.0.0", "author": { "name": "Boris Cherny", "email": "boris@anthropic.com" }, "source": "./plugins/learning-output-style", "category": "learning" }, { "name": "plugin-dev", "description": "Comprehensive toolkit for developing Claude Code plugins. Includes 7 expert skills covering hooks, MCP integration, commands, agents, and best practices. AI-assisted plugin creation and validation.", "version": "0.1.0", "author": { "name": "Daisy Hollman", "email": "daisy@anthropic.com" }, "source": "./plugins/plugin-dev", "category": "development" }, { "name": "pr-review-toolkit", "description": "Comprehensive PR review agents specializing in comments, tests, error handling, type design, code quality, and code simplification", "version": "1.0.0", "author": { "name": "Anthropic", "email": "support@anthropic.com" }, "source": "./plugins/pr-review-toolkit", "category": "productivity" }, { "name": "ralph-wiggum", "description": "Interactive self-referential AI loops for iterative development. Claude works on the same task repeatedly, seeing its previous work, until completion.", "version": "1.0.0", "author": { "name": "Daisy Hollman", "email": "daisy@anthropic.com" }, "source": "./plugins/ralph-wiggum", "category": "development" }, { "name": "security-guidance", "description": "Security reminder hook that warns about potential security issues when editing files, including command injection, XSS, and unsafe code patterns", "version": "1.0.0", "author": { "name": "David Dworken", "email": "dworken@anthropic.com" }, "source": "./plugins/security-guidance", "category": "security" } ] } ================================================ FILE: .devcontainer/Dockerfile ================================================ FROM node:20 ARG TZ ENV TZ="$TZ" ARG CLAUDE_CODE_VERSION=latest # Install basic development tools and iptables/ipset RUN apt-get update && apt-get install -y --no-install-recommends \ less \ git \ procps \ sudo \ fzf \ zsh \ man-db \ unzip \ gnupg2 \ gh \ iptables \ ipset \ iproute2 \ dnsutils \ aggregate \ jq \ nano \ vim \ && apt-get clean && rm -rf /var/lib/apt/lists/* # Ensure default node user has access to /usr/local/share RUN mkdir -p /usr/local/share/npm-global && \ chown -R node:node /usr/local/share ARG USERNAME=node # Persist bash history. RUN SNIPPET="export PROMPT_COMMAND='history -a' && export HISTFILE=/commandhistory/.bash_history" \ && mkdir /commandhistory \ && touch /commandhistory/.bash_history \ && chown -R $USERNAME /commandhistory # Set `DEVCONTAINER` environment variable to help with orientation ENV DEVCONTAINER=true # Create workspace and config directories and set permissions RUN mkdir -p /workspace /home/node/.claude && \ chown -R node:node /workspace /home/node/.claude WORKDIR /workspace ARG GIT_DELTA_VERSION=0.18.2 RUN ARCH=$(dpkg --print-architecture) && \ wget "https://github.com/dandavison/delta/releases/download/${GIT_DELTA_VERSION}/git-delta_${GIT_DELTA_VERSION}_${ARCH}.deb" && \ sudo dpkg -i "git-delta_${GIT_DELTA_VERSION}_${ARCH}.deb" && \ rm "git-delta_${GIT_DELTA_VERSION}_${ARCH}.deb" # Set up non-root user USER node # Install global packages ENV NPM_CONFIG_PREFIX=/usr/local/share/npm-global ENV PATH=$PATH:/usr/local/share/npm-global/bin # Set the default shell to zsh rather than sh ENV SHELL=/bin/zsh # Set the default editor and visual ENV EDITOR=nano ENV VISUAL=nano # Default powerline10k theme ARG ZSH_IN_DOCKER_VERSION=1.2.0 RUN sh -c "$(wget -O- https://github.com/deluan/zsh-in-docker/releases/download/v${ZSH_IN_DOCKER_VERSION}/zsh-in-docker.sh)" -- \ -p git \ -p fzf \ -a "source /usr/share/doc/fzf/examples/key-bindings.zsh" \ -a "source /usr/share/doc/fzf/examples/completion.zsh" \ -a "export PROMPT_COMMAND='history -a' && export HISTFILE=/commandhistory/.bash_history" \ -x # Install Claude RUN npm install -g @anthropic-ai/claude-code@${CLAUDE_CODE_VERSION} # Copy and set up firewall script COPY init-firewall.sh /usr/local/bin/ USER root RUN chmod +x /usr/local/bin/init-firewall.sh && \ echo "node ALL=(root) NOPASSWD: /usr/local/bin/init-firewall.sh" > /etc/sudoers.d/node-firewall && \ chmod 0440 /etc/sudoers.d/node-firewall USER node ================================================ FILE: .devcontainer/devcontainer.json ================================================ { "name": "Claude Code Sandbox", "build": { "dockerfile": "Dockerfile", "args": { "TZ": "${localEnv:TZ:America/Los_Angeles}", "CLAUDE_CODE_VERSION": "latest", "GIT_DELTA_VERSION": "0.18.2", "ZSH_IN_DOCKER_VERSION": "1.2.0" } }, "runArgs": [ "--cap-add=NET_ADMIN", "--cap-add=NET_RAW" ], "customizations": { "vscode": { "extensions": [ "anthropic.claude-code", "dbaeumer.vscode-eslint", "esbenp.prettier-vscode", "eamodio.gitlens" ], "settings": { "editor.formatOnSave": true, "editor.defaultFormatter": "esbenp.prettier-vscode", "editor.codeActionsOnSave": { "source.fixAll.eslint": "explicit" }, "terminal.integrated.defaultProfile.linux": "zsh", "terminal.integrated.profiles.linux": { "bash": { "path": "bash", "icon": "terminal-bash" }, "zsh": { "path": "zsh" } } } } }, "remoteUser": "node", "mounts": [ "source=claude-code-bashhistory-${devcontainerId},target=/commandhistory,type=volume", "source=claude-code-config-${devcontainerId},target=/home/node/.claude,type=volume" ], "containerEnv": { "NODE_OPTIONS": "--max-old-space-size=4096", "CLAUDE_CONFIG_DIR": "/home/node/.claude", "POWERLEVEL9K_DISABLE_GITSTATUS": "true" }, "workspaceMount": "source=${localWorkspaceFolder},target=/workspace,type=bind,consistency=delegated", "workspaceFolder": "/workspace", "postStartCommand": "sudo /usr/local/bin/init-firewall.sh", "waitFor": "postStartCommand" } ================================================ FILE: .devcontainer/init-firewall.sh ================================================ #!/bin/bash set -euo pipefail # Exit on error, undefined vars, and pipeline failures IFS=$'\n\t' # Stricter word splitting # 1. Extract Docker DNS info BEFORE any flushing DOCKER_DNS_RULES=$(iptables-save -t nat | grep "127\.0\.0\.11" || true) # Flush existing rules and delete existing ipsets iptables -F iptables -X iptables -t nat -F iptables -t nat -X iptables -t mangle -F iptables -t mangle -X ipset destroy allowed-domains 2>/dev/null || true # 2. Selectively restore ONLY internal Docker DNS resolution if [ -n "$DOCKER_DNS_RULES" ]; then echo "Restoring Docker DNS rules..." iptables -t nat -N DOCKER_OUTPUT 2>/dev/null || true iptables -t nat -N DOCKER_POSTROUTING 2>/dev/null || true echo "$DOCKER_DNS_RULES" | xargs -L 1 iptables -t nat else echo "No Docker DNS rules to restore" fi # First allow DNS and localhost before any restrictions # Allow outbound DNS iptables -A OUTPUT -p udp --dport 53 -j ACCEPT # Allow inbound DNS responses iptables -A INPUT -p udp --sport 53 -j ACCEPT # Allow outbound SSH iptables -A OUTPUT -p tcp --dport 22 -j ACCEPT # Allow inbound SSH responses iptables -A INPUT -p tcp --sport 22 -m state --state ESTABLISHED -j ACCEPT # Allow localhost iptables -A INPUT -i lo -j ACCEPT iptables -A OUTPUT -o lo -j ACCEPT # Create ipset with CIDR support ipset create allowed-domains hash:net # Fetch GitHub meta information and aggregate + add their IP ranges echo "Fetching GitHub IP ranges..." gh_ranges=$(curl -s https://api.github.com/meta) if [ -z "$gh_ranges" ]; then echo "ERROR: Failed to fetch GitHub IP ranges" exit 1 fi if ! echo "$gh_ranges" | jq -e '.web and .api and .git' >/dev/null; then echo "ERROR: GitHub API response missing required fields" exit 1 fi echo "Processing GitHub IPs..." while read -r cidr; do if [[ ! "$cidr" =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}/[0-9]{1,2}$ ]]; then echo "ERROR: Invalid CIDR range from GitHub meta: $cidr" exit 1 fi echo "Adding GitHub range $cidr" ipset add allowed-domains "$cidr" done < <(echo "$gh_ranges" | jq -r '(.web + .api + .git)[]' | aggregate -q) # Resolve and add other allowed domains for domain in \ "registry.npmjs.org" \ "api.anthropic.com" \ "sentry.io" \ "statsig.anthropic.com" \ "statsig.com" \ "marketplace.visualstudio.com" \ "vscode.blob.core.windows.net" \ "update.code.visualstudio.com"; do echo "Resolving $domain..." ips=$(dig +noall +answer A "$domain" | awk '$4 == "A" {print $5}') if [ -z "$ips" ]; then echo "ERROR: Failed to resolve $domain" exit 1 fi while read -r ip; do if [[ ! "$ip" =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then echo "ERROR: Invalid IP from DNS for $domain: $ip" exit 1 fi echo "Adding $ip for $domain" ipset add allowed-domains "$ip" done < <(echo "$ips") done # Get host IP from default route HOST_IP=$(ip route | grep default | cut -d" " -f3) if [ -z "$HOST_IP" ]; then echo "ERROR: Failed to detect host IP" exit 1 fi HOST_NETWORK=$(echo "$HOST_IP" | sed "s/\.[0-9]*$/.0\/24/") echo "Host network detected as: $HOST_NETWORK" # Set up remaining iptables rules iptables -A INPUT -s "$HOST_NETWORK" -j ACCEPT iptables -A OUTPUT -d "$HOST_NETWORK" -j ACCEPT # Set default policies to DROP first iptables -P INPUT DROP iptables -P FORWARD DROP iptables -P OUTPUT DROP # First allow established connections for already approved traffic iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT iptables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT # Then allow only specific outbound traffic to allowed domains iptables -A OUTPUT -m set --match-set allowed-domains dst -j ACCEPT # Explicitly REJECT all other outbound traffic for immediate feedback iptables -A OUTPUT -j REJECT --reject-with icmp-admin-prohibited echo "Firewall configuration complete" echo "Verifying firewall rules..." if curl --connect-timeout 5 https://example.com >/dev/null 2>&1; then echo "ERROR: Firewall verification failed - was able to reach https://example.com" exit 1 else echo "Firewall verification passed - unable to reach https://example.com as expected" fi # Verify GitHub API access if ! curl --connect-timeout 5 https://api.github.com/zen >/dev/null 2>&1; then echo "ERROR: Firewall verification failed - unable to reach https://api.github.com" exit 1 else echo "Firewall verification passed - able to reach https://api.github.com as expected" fi ================================================ FILE: .gitattributes ================================================ * text=auto eol=lf *.sh text eol=lf ================================================ FILE: .github/ISSUE_TEMPLATE/bug_report.yml ================================================ name: 🐛 Bug Report description: Report a bug or unexpected behavior in Claude Code title: "[BUG] " labels: - bug body: - type: markdown attributes: value: | Thanks for taking the time to report this bug! Please fill out the sections below to help us understand and fix the issue. Before submitting, please check: - You're using the [latest version](https://www.npmjs.com/package/@anthropic-ai/claude-code?activeTab=versions) of Claude Code (`claude --version`) - This issue hasn't already been reported by searching [existing issues](https://github.com/anthropics/claude-code/issues?q=is%3Aissue%20state%3Aopen%20label%3Abug). - This is a bug, not a feature request or support question - type: checkboxes id: preflight attributes: label: Preflight Checklist description: Please confirm before submitting options: - label: I have searched [existing issues](https://github.com/anthropics/claude-code/issues?q=is%3Aissue%20state%3Aopen%20label%3Abug) and this hasn't been reported yet required: true - label: This is a single bug report (please file separate reports for different bugs) required: true - label: I am using the latest version of Claude Code required: true - type: textarea id: actual attributes: label: What's Wrong? description: Describe what's happening that shouldn't be placeholder: | When I try to create a Python file, Claude shows an error "EACCES: permission denied" and the file isn't created. The command fails immediately after accepting the file write permission... validations: required: true - type: textarea id: expected attributes: label: What Should Happen? description: Describe the expected behavior placeholder: Claude should create a Python script file successfully without errors validations: required: true - type: textarea id: error_output attributes: label: Error Messages/Logs description: If you see any error messages, paste them here placeholder: | Paste any error output, stack traces, or relevant logs here. This will be automatically formatted as code. render: shell validations: required: false - type: textarea id: reproduction attributes: label: Steps to Reproduce description: | Please provide clear, numbered steps that anyone can follow to reproduce the issue. **Important**: Include any necessary code, file contents, or context needed to reproduce the bug. If the issue involves specific files or code, please create a minimal example. placeholder: | 1. Create a file `test.py` with this content: ```python def hello(): print("test") ``` 2. Run `claude "add type hints to test.py"` 3. When prompted for file access, accept 4. Error appears: "Unable to parse..." Note: The bug only happens with Python files containing... validations: required: true - type: dropdown id: model attributes: label: Claude Model description: Which model were you using? (Run `/model` to check) options: - Sonnet (default) - Opus - Not sure / Multiple models - Other validations: required: false - type: dropdown id: regression attributes: label: Is this a regression? description: Did this work in a previous version? options: - "Yes, this worked in a previous version" - "No, this never worked" - "I don't know" validations: required: true - type: input id: working_version attributes: label: Last Working Version description: If this is a regression, which version last worked? This helps expedite a fix. placeholder: "e.g., 1.0.100" validations: required: false - type: input id: version attributes: label: Claude Code Version description: Run `claude --version` and paste the output placeholder: "e.g., 1.0.123 (Claude Code)" validations: required: true - type: dropdown id: platform attributes: label: Platform description: Which API platform are you using? options: - Anthropic API - AWS Bedrock - Google Vertex AI - Other validations: required: true - type: dropdown id: os attributes: label: Operating System options: - macOS - Windows - Ubuntu/Debian Linux - Other Linux - Other validations: required: true - type: dropdown id: terminal attributes: label: Terminal/Shell description: Which terminal are you using? options: - Terminal.app (macOS) - Warp - Cursor - iTerm2 - IntelliJ IDEA terminal - VS Code integrated terminal - PyCharm terminal - Windows Terminal - PowerShell - WSL (Windows Subsystem for Linux) - Xterm - Non-interactive/CI environment - Other validations: required: true - type: textarea id: additional attributes: label: Additional Information description: | Anything else that might help us understand the issue? - Screenshots (drag and drop images here) - Configuration files - Related files or code - Links to repositories demonstrating the issue placeholder: Any additional context, screenshots, or information... validations: required: false ================================================ FILE: .github/ISSUE_TEMPLATE/config.yml ================================================ blank_issues_enabled: false contact_links: - name: 💬 Discord Community url: https://anthropic.com/discord about: Get help, ask questions, and chat with other Claude Code users - name: 📖 Documentation url: https://docs.claude.com/en/docs/claude-code about: Read the official documentation and guides - name: 🎓 Getting Started Guide url: https://docs.claude.com/en/docs/claude-code/quickstart about: New to Claude Code? Start here - name: 🔧 Troubleshooting Guide url: https://docs.claude.com/en/docs/claude-code/troubleshooting about: Common issues and how to fix them ================================================ FILE: .github/ISSUE_TEMPLATE/documentation.yml ================================================ name: 📚 Documentation Issue description: Report missing, unclear, or incorrect documentation title: "[DOCS] " labels: - documentation body: - type: markdown attributes: value: | ## Help us improve our documentation! Good documentation is crucial for a great developer experience. Please let us know what's missing or confusing. - type: dropdown id: doc_type attributes: label: Documentation Type description: What kind of documentation issue is this? options: - Missing documentation (feature not documented) - Unclear/confusing documentation - Incorrect/outdated documentation - Typo or formatting issue - Missing code examples - Broken links - Other validations: required: true - type: input id: location attributes: label: Documentation Location description: Where did you encounter this issue? Provide a URL if possible placeholder: "e.g., https://docs.anthropic.com/en/docs/claude-code/getting-started" validations: required: false - type: input id: section attributes: label: Section/Topic description: Which specific section or topic needs improvement? placeholder: "e.g., MCP Server Configuration section" validations: required: true - type: textarea id: current attributes: label: Current Documentation description: | What does the documentation currently say? Quote the specific text if applicable. placeholder: | The docs currently say: "To configure MCP servers, add them to your configuration..." But it doesn't explain... validations: required: false - type: textarea id: issue attributes: label: What's Wrong or Missing? description: Explain what's incorrect, unclear, or missing placeholder: | The documentation doesn't explain how to: - Configure multiple MCP servers - Handle authentication - Debug connection issues The example code doesn't work because... validations: required: true - type: textarea id: suggested attributes: label: Suggested Improvement description: How should the documentation be improved? Provide suggested text if possible placeholder: | The documentation should include: 1. A complete example showing... 2. Explanation of common errors like... 3. Step-by-step guide for... Suggested text: "To configure multiple MCP servers, create an array in your settings..." validations: required: true - type: dropdown id: impact attributes: label: Impact description: How much does this documentation issue affect users? options: - High - Prevents users from using a feature - Medium - Makes feature difficult to understand - Low - Minor confusion or inconvenience validations: required: true - type: textarea id: additional attributes: label: Additional Context description: | - Screenshots showing the issue - Links to related documentation - Examples from other projects that do this well placeholder: Any additional information that would help... validations: required: false ================================================ FILE: .github/ISSUE_TEMPLATE/feature_request.yml ================================================ name: ✨ Feature Request description: Suggest a new feature or enhancement for Claude Code title: "[FEATURE] " labels: - enhancement body: - type: markdown attributes: value: | ## Thanks for suggesting a feature! We love hearing ideas from our community. Please help us understand your use case by filling out the sections below. Before submitting, please check if this feature has already been requested. - type: checkboxes id: preflight attributes: label: Preflight Checklist options: - label: I have searched [existing requests](https://github.com/anthropics/claude-code/issues?q=is%3Aissue%20label%3Aenhancement) and this feature hasn't been requested yet required: true - label: This is a single feature request (not multiple features) required: true - type: textarea id: problem attributes: label: Problem Statement description: | What problem are you trying to solve? Why do you need this feature? Focus on the problem, not the solution. Help us understand your workflow. placeholder: | I often need to work with multiple projects simultaneously, but Claude Code doesn't support... When I'm debugging code, I find it difficult to... The current workflow requires me to manually... validations: required: true - type: textarea id: solution attributes: label: Proposed Solution description: | How would you like this to work? Describe the ideal user experience. Be specific about how you'd interact with this feature. placeholder: | I'd like to be able to run `claude --workspace project1,project2` to... There should be a command or setting that allows... The interface should show... validations: required: true - type: textarea id: alternatives attributes: label: Alternative Solutions description: | What alternatives have you considered or tried? Are there workarounds you're currently using? placeholder: | I've tried using multiple terminal windows but... Currently I work around this by... Other tools solve this by... validations: required: false - type: dropdown id: priority attributes: label: Priority description: How important is this feature to your workflow? options: - Critical - Blocking my work - High - Significant impact on productivity - Medium - Would be very helpful - Low - Nice to have validations: required: true - type: dropdown id: category attributes: label: Feature Category description: What area does this feature relate to? options: - CLI commands and flags - Interactive mode (TUI) - File operations - API and model interactions - MCP server integration - Performance and speed - Configuration and settings - Developer tools/SDK - Documentation - Other validations: required: true - type: textarea id: use_case attributes: label: Use Case Example description: | Provide a concrete, real-world example of when you'd use this feature. Walk us through a scenario step-by-step. placeholder: | Example scenario: 1. I'm working on a React app with a Node.js backend 2. I need to make changes to both frontend and backend 3. With this feature, I could... 4. This would save me time because... validations: required: false - type: textarea id: additional attributes: label: Additional Context description: | - Screenshots or mockups of the proposed feature - Links to similar features in other tools - Technical considerations or constraints - Any other relevant information placeholder: Add any other context, mockups, or examples here... validations: required: false ================================================ FILE: .github/ISSUE_TEMPLATE/model_behavior.yml ================================================ name: 🤖 Model Behavior Issue description: Report unexpected Claude model behavior, incorrect actions, or permission violations title: "[MODEL] " labels: - model body: - type: markdown attributes: value: | ## Report Unexpected Model Behavior Use this template when Claude does something unexpected, makes unwanted changes, or behaves inconsistently with your instructions. **This is for:** Unexpected actions, file modifications outside scope, ignoring instructions, making assumptions **NOT for:** Crashes, API errors, or installation issues (use Bug Report instead) - type: checkboxes id: preflight attributes: label: Preflight Checklist description: Please confirm before submitting options: - label: I have searched [existing issues](https://github.com/anthropics/claude-code/issues?q=is%3Aissue%20state%3Aopen%20label%3Amodel) for similar behavior reports required: true - label: This report does NOT contain sensitive information (API keys, passwords, etc.) required: true - type: dropdown id: behavior_type attributes: label: Type of Behavior Issue description: What category best describes the unexpected behavior? options: - Claude modified files I didn't ask it to modify - Claude accessed files outside the working directory - Claude ignored my instructions or configuration - Claude reverted/undid previous changes without asking - Claude made incorrect assumptions about my project - Claude refused a reasonable request - Claude's behavior changed between sessions - Subagent behaved unexpectedly - Other unexpected behavior validations: required: true - type: textarea id: what_you_asked attributes: label: What You Asked Claude to Do description: Provide the exact prompt or command you gave placeholder: | I asked: "Update the README.md file to add installation instructions" Or I ran: `claude "fix the bug in auth.js"` validations: required: true - type: textarea id: what_claude_did attributes: label: What Claude Actually Did description: Describe step-by-step what Claude did instead placeholder: | 1. Claude read README.md 2. Instead of updating it, Claude deleted the entire file 3. Created a new README from scratch with different content 4. Also modified package.json without being asked 5. Changed .gitignore file validations: required: true - type: textarea id: expected_behavior attributes: label: Expected Behavior description: What should Claude have done? placeholder: | Claude should have: 1. Read the existing README.md 2. Added an "Installation" section 3. Only modified that single file 4. Not touched any other files validations: required: true - type: textarea id: files_affected attributes: label: Files Affected description: | List all files that were accessed or modified (even if you didn't expect them to be) placeholder: | Modified: - README.md (deleted and recreated) - package.json (version bumped - not requested) - .gitignore (added entries - not requested) Read (unexpectedly): - /Users/me/.ssh/config - ../../../parent-directory/secrets.env render: shell validations: required: false - type: dropdown id: permission_mode attributes: label: Permission Mode description: What permission settings were active? options: - Accept Edits was ON (auto-accepting changes) - Accept Edits was OFF (manual approval required) - I don't know / Not sure validations: required: true - type: dropdown id: reproducible attributes: label: Can You Reproduce This? description: Does this happen consistently? options: - Yes, every time with the same prompt - Sometimes (intermittent) - No, only happened once - Haven't tried to reproduce validations: required: true - type: textarea id: reproduction_steps attributes: label: Steps to Reproduce description: If reproducible, provide minimal steps placeholder: | 1. Create a new directory with a simple README.md 2. Ask Claude Code to "improve the README" 3. Claude will delete and recreate the file instead of editing validations: required: false - type: dropdown id: model attributes: label: Claude Model description: Which model were you using? (Run `/model` to check) options: - Sonnet - Opus - Haiku - Not sure - Other validations: required: true - type: textarea id: conversation_log attributes: label: Relevant Conversation description: | Include relevant parts of Claude's responses, especially where it explains what it's doing placeholder: | Claude said: "I'll help you update the README. Let me first delete the old one and create a fresh version..." [Then proceeded to delete without asking for confirmation] render: markdown validations: required: false - type: dropdown id: impact attributes: label: Impact description: How severe was the impact of this behavior? options: - Critical - Data loss or corrupted project - High - Significant unwanted changes - Medium - Extra work to undo changes - Low - Minor inconvenience validations: required: true - type: input id: version attributes: label: Claude Code Version description: Run `claude --version` and paste the output placeholder: "e.g., 1.0.123 (Claude Code)" validations: required: true - type: dropdown id: platform attributes: label: Platform description: Which API platform are you using? options: - Anthropic API - AWS Bedrock - Google Vertex AI - Other validations: required: true - type: textarea id: additional attributes: label: Additional Context description: | - Any patterns you've noticed - Similar behavior in other sessions - Specific file types or project structures that trigger this - Screenshots if relevant placeholder: | This seems to happen more often with: - Python projects - When there are multiple similar files - After long conversations validations: required: false ================================================ FILE: .github/workflows/auto-close-duplicates.yml ================================================ name: Auto-close duplicate issues description: Auto-closes issues that are duplicates of existing issues on: schedule: - cron: "0 9 * * *" workflow_dispatch: jobs: auto-close-duplicates: runs-on: ubuntu-latest timeout-minutes: 10 permissions: contents: read issues: write steps: - name: Checkout repository uses: actions/checkout@v4 - name: Setup Bun uses: oven-sh/setup-bun@v2 with: bun-version: latest - name: Auto-close duplicate issues run: bun run scripts/auto-close-duplicates.ts env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }} GITHUB_REPOSITORY_NAME: ${{ github.event.repository.name }} STATSIG_API_KEY: ${{ secrets.STATSIG_API_KEY }} ================================================ FILE: .github/workflows/backfill-duplicate-comments.yml ================================================ name: Backfill Duplicate Comments description: Triggers duplicate detection for old issues that don't have duplicate comments on: workflow_dispatch: inputs: days_back: description: 'How many days back to look for old issues' required: false default: '90' type: string dry_run: description: 'Dry run mode (true to only log what would be done)' required: false default: 'true' type: choice options: - 'true' - 'false' jobs: backfill-duplicate-comments: runs-on: ubuntu-latest timeout-minutes: 30 permissions: contents: read issues: read actions: write steps: - name: Checkout repository uses: actions/checkout@v4 - name: Setup Bun uses: oven-sh/setup-bun@v2 with: bun-version: latest - name: Backfill duplicate comments run: bun run scripts/backfill-duplicate-comments.ts env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} DAYS_BACK: ${{ inputs.days_back }} DRY_RUN: ${{ inputs.dry_run }} ================================================ FILE: .github/workflows/claude-dedupe-issues.yml ================================================ name: Claude Issue Dedupe description: Automatically dedupe GitHub issues using Claude Code on: issues: types: [opened] workflow_dispatch: inputs: issue_number: description: 'Issue number to process for duplicate detection' required: true type: string jobs: claude-dedupe-issues: runs-on: ubuntu-latest timeout-minutes: 10 permissions: contents: read issues: write steps: - name: Checkout repository uses: actions/checkout@v4 - name: Run Claude Code slash command uses: anthropics/claude-code-action@v1 env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: github_token: ${{ secrets.GITHUB_TOKEN }} allowed_non_write_users: "*" prompt: "/dedupe ${{ github.repository }}/issues/${{ github.event.issue.number || inputs.issue_number }}" anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} claude_args: "--model claude-sonnet-4-5-20250929" - name: Log duplicate comment event to Statsig if: always() env: STATSIG_API_KEY: ${{ secrets.STATSIG_API_KEY }} run: | ISSUE_NUMBER=${{ github.event.issue.number || inputs.issue_number }} REPO=${{ github.repository }} if [ -z "$STATSIG_API_KEY" ]; then echo "STATSIG_API_KEY not found, skipping Statsig logging" exit 0 fi # Prepare the event payload EVENT_PAYLOAD=$(jq -n \ --arg issue_number "$ISSUE_NUMBER" \ --arg repo "$REPO" \ --arg triggered_by "${{ github.event_name }}" \ '{ events: [{ eventName: "github_duplicate_comment_added", value: 1, metadata: { repository: $repo, issue_number: ($issue_number | tonumber), triggered_by: $triggered_by, workflow_run_id: "${{ github.run_id }}" }, time: (now | floor | tostring) }] }') # Send to Statsig API echo "Logging duplicate comment event to Statsig for issue #${ISSUE_NUMBER}" RESPONSE=$(curl -s -w "\n%{http_code}" -X POST https://events.statsigapi.net/v1/log_event \ -H "Content-Type: application/json" \ -H "STATSIG-API-KEY: ${STATSIG_API_KEY}" \ -d "$EVENT_PAYLOAD") HTTP_CODE=$(echo "$RESPONSE" | tail -n1) BODY=$(echo "$RESPONSE" | head -n-1) if [ "$HTTP_CODE" -eq 200 ] || [ "$HTTP_CODE" -eq 202 ]; then echo "Successfully logged duplicate comment event for issue #${ISSUE_NUMBER}" else echo "Failed to log duplicate comment event for issue #${ISSUE_NUMBER}. HTTP ${HTTP_CODE}: ${BODY}" fi ================================================ FILE: .github/workflows/claude-issue-triage.yml ================================================ name: Claude Issue Triage on: issues: types: [opened] issue_comment: types: [created] jobs: triage-issue: runs-on: ubuntu-latest timeout-minutes: 10 if: >- github.event_name == 'issues' || (github.event_name == 'issue_comment' && !github.event.issue.pull_request && github.event.comment.user.type != 'Bot') concurrency: group: issue-triage-${{ github.event.issue.number }} cancel-in-progress: true permissions: contents: read issues: write steps: - name: Checkout repository uses: actions/checkout@v4 - name: Run Claude Code for Issue Triage timeout-minutes: 5 uses: anthropics/claude-code-action@v1 env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_REPO: ${{ github.repository }} with: github_token: ${{ secrets.GITHUB_TOKEN }} allowed_non_write_users: "*" prompt: "/triage-issue REPO: ${{ github.repository }} ISSUE_NUMBER: ${{ github.event.issue.number }} EVENT: ${{ github.event_name }}" anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} claude_args: | --model claude-opus-4-6 ================================================ FILE: .github/workflows/claude.yml ================================================ name: Claude Code on: issue_comment: types: [created] pull_request_review_comment: types: [created] issues: types: [opened, assigned] pull_request_review: types: [submitted] jobs: claude: if: | (github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) || (github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) || (github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) || (github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude'))) runs-on: ubuntu-latest permissions: contents: read pull-requests: read issues: read id-token: write steps: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: fetch-depth: 1 - name: Run Claude Code id: claude uses: anthropics/claude-code-action@v1 with: anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} claude_args: "--model claude-sonnet-4-5-20250929" ================================================ FILE: .github/workflows/issue-lifecycle-comment.yml ================================================ name: "Issue Lifecycle Comment" on: issues: types: [labeled] permissions: issues: write jobs: comment: runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 - name: Setup Bun uses: oven-sh/setup-bun@v2 with: bun-version: latest - name: Post lifecycle comment run: bun run scripts/lifecycle-comment.ts env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} LABEL: ${{ github.event.label.name }} ISSUE_NUMBER: ${{ github.event.issue.number }} ================================================ FILE: .github/workflows/issue-opened-dispatch.yml ================================================ name: Issue Opened Dispatch on: issues: types: [opened] permissions: issues: read actions: write jobs: notify: runs-on: ubuntu-latest timeout-minutes: 1 steps: - name: Process new issue env: ISSUE_URL: ${{ github.event.issue.html_url }} ISSUE_NUMBER: ${{ github.event.issue.number }} ISSUE_TITLE: ${{ github.event.issue.title }} TARGET_REPO: ${{ secrets.ISSUE_OPENED_DISPATCH_TARGET_REPO }} GH_TOKEN: ${{ secrets.ISSUE_OPENED_DISPATCH_TOKEN }} run: | gh api repos/${TARGET_REPO}/dispatches \ -f event_type=issue_opened \ -f client_payload[issue_url]="${ISSUE_URL}" || { exit 0 } ================================================ FILE: .github/workflows/lock-closed-issues.yml ================================================ name: "Lock Stale Issues" on: schedule: # 8am Pacific = 1pm UTC (2pm UTC during DST) - cron: "0 14 * * *" workflow_dispatch: permissions: issues: write concurrency: group: lock-threads jobs: lock-closed-issues: runs-on: ubuntu-latest steps: - name: Lock closed issues after 7 days of inactivity uses: actions/github-script@v7 with: script: | const sevenDaysAgo = new Date(); sevenDaysAgo.setDate(sevenDaysAgo.getDate() - 7); const lockComment = `This issue has been automatically locked since it was closed and has not had any activity for 7 days. If you're experiencing a similar issue, please file a new issue and reference this one if it's relevant.`; let page = 1; let hasMore = true; let totalLocked = 0; while (hasMore) { // Get closed issues (pagination) const { data: issues } = await github.rest.issues.listForRepo({ owner: context.repo.owner, repo: context.repo.repo, state: 'closed', sort: 'updated', direction: 'asc', per_page: 100, page: page }); if (issues.length === 0) { hasMore = false; break; } for (const issue of issues) { // Skip if already locked if (issue.locked) continue; // Skip pull requests if (issue.pull_request) continue; // Check if updated more than 7 days ago const updatedAt = new Date(issue.updated_at); if (updatedAt > sevenDaysAgo) { // Since issues are sorted by updated_at ascending, // once we hit a recent issue, all remaining will be recent too hasMore = false; break; } try { // Add comment before locking await github.rest.issues.createComment({ owner: context.repo.owner, repo: context.repo.repo, issue_number: issue.number, body: lockComment }); // Lock the issue await github.rest.issues.lock({ owner: context.repo.owner, repo: context.repo.repo, issue_number: issue.number, lock_reason: 'resolved' }); totalLocked++; console.log(`Locked issue #${issue.number}: ${issue.title}`); } catch (error) { console.error(`Failed to lock issue #${issue.number}: ${error.message}`); } } page++; } console.log(`Total issues locked: ${totalLocked}`); ================================================ FILE: .github/workflows/log-issue-events.yml ================================================ name: Log Issue Events to Statsig on: issues: types: [opened, closed] jobs: log-to-statsig: runs-on: ubuntu-latest permissions: issues: read steps: - name: Log issue creation to Statsig env: STATSIG_API_KEY: ${{ secrets.STATSIG_API_KEY }} ISSUE_NUMBER: ${{ github.event.issue.number }} REPO: ${{ github.repository }} ISSUE_TITLE: ${{ github.event.issue.title }} AUTHOR: ${{ github.event.issue.user.login }} CREATED_AT: ${{ github.event.issue.created_at }} run: | # All values are now safely passed via environment variables # No direct templating in the shell script to prevent injection attacks curl -X POST "https://events.statsigapi.net/v1/log_event" \ -H "Content-Type: application/json" \ -H "statsig-api-key: $STATSIG_API_KEY" \ -d '{ "events": [{ "eventName": "github_issue_created", "metadata": { "issue_number": "'"$ISSUE_NUMBER"'", "repository": "'"$REPO"'", "title": "'"$(echo "$ISSUE_TITLE" | sed "s/\"/\\\\\"/g")"'", "author": "'"$AUTHOR"'", "created_at": "'"$CREATED_AT"'" }, "time": '"$(date +%s)000"' }] }' ================================================ FILE: .github/workflows/non-write-users-check.yml ================================================ name: Non-write Users Check on: pull_request: paths: - ".github/**" permissions: contents: read pull-requests: write jobs: allowed-non-write-check: runs-on: ubuntu-latest env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - run: | DIFF=$(gh pr diff "$PR_NUMBER" -R "$REPO" || true) if ! echo "$DIFF" | grep -qE '^diff --git a/\.github/.*\.ya?ml'; then exit 0 fi MATCHES=$(echo "$DIFF" | grep "^+.*allowed_non_write_users" || true) if [ -z "$MATCHES" ]; then exit 0 fi EXISTING=$(gh pr view "$PR_NUMBER" -R "$REPO" --json comments --jq '.comments[].body' \ | grep -c "" || true) if [ "$EXISTING" -gt 0 ]; then exit 0 fi gh pr comment "$PR_NUMBER" -R "$REPO" --body ' **`allowed_non_write_users` detected** This PR adds or modifies `allowed_non_write_users`, which allows users without write access to trigger Claude Code Action workflows. This can introduce security risks. If this is a new flow, please make sure you actually need `allowed_non_write_users`. If you are editing an existing workflow, double check that you are not adding new Claude permissions which might lead to a vulnerability. See existing workflows in this repo for safe usage examples, or contact the AppSec team.' env: PR_NUMBER: ${{ github.event.pull_request.number }} REPO: ${{ github.repository }} ================================================ FILE: .github/workflows/remove-autoclose-label.yml ================================================ name: "Remove Autoclose Label on Activity" on: issue_comment: types: [created] permissions: issues: write jobs: remove-autoclose: # Only run if the issue has the autoclose label if: | github.event.issue.state == 'open' && contains(github.event.issue.labels.*.name, 'autoclose') && github.event.comment.user.login != 'github-actions[bot]' runs-on: ubuntu-latest steps: - name: Remove autoclose label uses: actions/github-script@v7 with: script: | console.log(`Removing autoclose label from issue #${context.issue.number} due to new comment from ${context.payload.comment.user.login}`); try { // Remove the autoclose label await github.rest.issues.removeLabel({ owner: context.repo.owner, repo: context.repo.repo, issue_number: context.issue.number, name: 'autoclose' }); console.log(`Successfully removed autoclose label from issue #${context.issue.number}`); } catch (error) { // If the label was already removed or doesn't exist, that's fine if (error.status === 404) { console.log(`Autoclose label was already removed from issue #${context.issue.number}`); } else { throw error; } } ================================================ FILE: .github/workflows/sweep.yml ================================================ name: "Issue Sweep" on: schedule: - cron: "0 10,22 * * *" workflow_dispatch: permissions: issues: write concurrency: group: daily-issue-sweep jobs: sweep: runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 - name: Setup Bun uses: oven-sh/setup-bun@v2 with: bun-version: latest - name: Enforce lifecycle timeouts run: bun run scripts/sweep.ts env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }} GITHUB_REPOSITORY_NAME: ${{ github.event.repository.name }} ================================================ FILE: .gitignore ================================================ .DS_Store ================================================ FILE: .vscode/extensions.json ================================================ { "recommendations": [ "dbaeumer.vscode-eslint", "esbenp.prettier-vscode", "ms-vscode-remote.remote-containers", "eamodio.gitlens" ] } ================================================ FILE: CHANGELOG.md ================================================ # Changelog ## 2.1.80 - Added `rate_limits` field to statusline scripts for displaying Claude.ai rate limit usage (5-hour and 7-day windows with `used_percentage` and `resets_at`) - Added `source: 'settings'` plugin marketplace source — declare plugin entries inline in settings.json - Added CLI tool usage detection to plugin tips, in addition to file pattern matching - Added `effort` frontmatter support for skills and slash commands to override the model effort level when invoked - Added `--channels` (research preview) — allow MCP servers to push messages into your session - Fixed `--resume` dropping parallel tool results — sessions with parallel tool calls now restore all tool_use/tool_result pairs instead of showing `[Tool result missing]` placeholders - Fixed voice mode WebSocket failures caused by Cloudflare bot detection on non-browser TLS fingerprints - Fixed 400 errors when using fine-grained tool streaming through API proxies, Bedrock, or Vertex - Fixed `/remote-control` appearing for gateway and third-party provider deployments where it cannot function - Fixed `/sandbox` tab switching not responding to Tab or arrow keys - Improved responsiveness of `@` file autocomplete in large git repositories - Improved `/effort` to show what auto currently resolves to, matching the status bar indicator - Improved `/permissions` — Tab and arrow keys now switch tabs from within a list - Improved background tasks panel — left arrow now closes from the list view - Simplified plugin install tips to use a single `/plugin install` command instead of a two-step flow - Reduced memory usage on startup in large repositories (~80 MB saved on 250k-file repos) - Fixed managed settings (`enabledPlugins`, `permissions.defaultMode`, policy-set env vars) not being applied at startup when `remote-settings.json` was cached from a prior session ## 2.1.79 - Added `--console` flag to `claude auth login` for Anthropic Console (API billing) authentication - Added "Show turn duration" toggle to the `/config` menu - Fixed `claude -p` hanging when spawned as a subprocess without explicit stdin (e.g. Python `subprocess.run`) - Fixed Ctrl+C not working in `-p` (print) mode - Fixed `/btw` returning the main agent's output instead of answering the side question when triggered during streaming - Fixed voice mode not activating correctly on startup when `voiceEnabled: true` is set - Fixed left/right arrow tab navigation in `/permissions` - Fixed `CLAUDE_CODE_DISABLE_TERMINAL_TITLE` not preventing terminal title from being set on startup - Fixed custom status line showing nothing when workspace trust is blocking it - Fixed enterprise users being unable to retry on rate limit (429) errors - Fixed `SessionEnd` hooks not firing when using interactive `/resume` to switch sessions - Improved startup memory usage by ~18MB across all scenarios - Improved non-streaming API fallback with a 2-minute per-attempt timeout, preventing sessions from hanging indefinitely - `CLAUDE_CODE_PLUGIN_SEED_DIR` now supports multiple seed directories separated by the platform path delimiter (`:` on Unix, `;` on Windows) - [VSCode] Added `/remote-control` — bridge your session to claude.ai/code to continue from a browser or phone - [VSCode] Session tabs now get AI-generated titles based on your first message - [VSCode] Fixed the thinking pill showing "Thinking" instead of "Thought for Ns" after a response completes - [VSCode] Fixed missing session diff button when opening sessions from the left sidebar ## 2.1.78 - Added `StopFailure` hook event that fires when the turn ends due to an API error (rate limit, auth failure, etc.) - Added `${CLAUDE_PLUGIN_DATA}` variable for plugin persistent state that survives plugin updates; `/plugin uninstall` prompts before deleting it - Added `effort`, `maxTurns`, and `disallowedTools` frontmatter support for plugin-shipped agents - Terminal notifications (iTerm2/Kitty/Ghostty popups, progress bar) now reach the outer terminal when running inside tmux with `set -g allow-passthrough on` - Response text now streams line-by-line as it's generated - Fixed `git log HEAD` failing with "ambiguous argument" inside sandboxed Bash on Linux, and stub files polluting `git status` in the working directory - Fixed `cc log` and `--resume` silently truncating conversation history on large sessions (>5 MB) that used subagents - Fixed infinite loop when API errors triggered stop hooks that re-fed blocking errors to the model - Fixed `deny: ["mcp__servername"]` permission rules not removing MCP server tools before sending to the model, allowing it to see and attempt blocked tools - Fixed `sandbox.filesystem.allowWrite` not working with absolute paths (previously required `//` prefix) - Fixed `/sandbox` Dependencies tab showing Linux prerequisites on macOS instead of macOS-specific info - **Security:** Fixed silent sandbox disable when `sandbox.enabled: true` is set but dependencies are missing — now shows a visible startup warning - Fixed `.git`, `.claude`, and other protected directories being writable without a prompt in `bypassPermissions` mode - Fixed ctrl+u in normal mode scrolling instead of readline kill-line (ctrl+u/ctrl+d half-page scroll moved to transcript mode only) - Fixed voice mode modifier-combo push-to-talk keybindings (e.g. ctrl+k) requiring a hold instead of activating immediately - Fixed voice mode not working on WSL2 with WSLg (Windows 11); WSL1/Win10 users now get a clear error - Fixed `--worktree` flag not loading skills and hooks from the worktree directory - Fixed `CLAUDE_CODE_DISABLE_GIT_INSTRUCTIONS` and `includeGitInstructions` setting not suppressing the git status section in the system prompt - Fixed Bash tool not finding Homebrew and other PATH-dependent binaries when VS Code is launched from Dock/Spotlight - Fixed washed-out Claude orange color in VS Code/Cursor/code-server terminals that don't advertise truecolor support - Added `ANTHROPIC_CUSTOM_MODEL_OPTION` env var to add a custom entry to the `/model` picker, with optional `_NAME` and `_DESCRIPTION` suffixed vars for display - Fixed `ANTHROPIC_BETAS` environment variable being silently ignored when using Haiku models - Fixed queued prompts being concatenated without a newline separator - Improved memory usage and startup time when resuming large sessions - [VSCode] Fixed a brief flash of the login screen when opening the sidebar while already authenticated - [VSCode] Fixed "API Error: Rate limit reached" when selecting Opus — model dropdown no longer offers 1M context variant to subscribers whose plan tier is unknown ## 2.1.77 - Increased default maximum output token limits for Claude Opus 4.6 to 64k tokens, and the upper bound for Opus 4.6 and Sonnet 4.6 models to 128k tokens - Added `allowRead` sandbox filesystem setting to re-allow read access within `denyRead` regions - `/copy` now accepts an optional index: `/copy N` copies the Nth-latest assistant response - Fixed "Always Allow" on compound bash commands (e.g. `cd src && npm test`) saving a single rule for the full string instead of per-subcommand, leading to dead rules and repeated permission prompts - Fixed auto-updater starting overlapping binary downloads when the slash-command overlay repeatedly opened and closed, accumulating tens of gigabytes of memory - Fixed `--resume` silently truncating recent conversation history due to a race between memory-extraction writes and the main transcript - Fixed PreToolUse hooks returning `"allow"` bypassing `deny` permission rules, including enterprise managed settings - Fixed Write tool silently converting line endings when overwriting CRLF files or creating files in CRLF directories - Fixed memory growth in long-running sessions from progress messages surviving compaction - Fixed cost and token usage not being tracked when the API falls back to non-streaming mode - Fixed `CLAUDE_CODE_DISABLE_EXPERIMENTAL_BETAS` not stripping beta tool-schema fields, causing proxy gateways to reject requests - Fixed Bash tool reporting errors for successful commands when the system temp directory path contains spaces - Fixed paste being lost when typing immediately after pasting - Fixed Ctrl+D in `/feedback` text input deleting forward instead of the second press exiting the session - Fixed API error when dragging a 0-byte image file into the prompt - Fixed Claude Desktop sessions incorrectly using the terminal CLI's configured API key instead of OAuth - Fixed `git-subdir` plugins at different subdirectories of the same monorepo commit colliding in the plugin cache - Fixed ordered list numbers not rendering in terminal UI - Fixed a race condition where stale-worktree cleanup could delete an agent worktree just resumed from a previous crash - Fixed input deadlock when opening `/mcp` or similar dialogs while the agent is running - Fixed Backspace and Delete keys not working in vim NORMAL mode - Fixed status line not updating when vim mode is toggled on or off - Fixed hyperlinks opening twice on Cmd+click in VS Code, Cursor, and other xterm.js-based terminals - Fixed background colors rendering as terminal-default inside tmux with default configuration - Fixed iTerm2 session crash when selecting text inside tmux over SSH - Fixed clipboard copy silently failing in tmux sessions; copy toast now indicates whether to paste with `⌘V` or tmux `prefix+]` - Fixed `←`/`→` accidentally switching tabs in settings, permissions, and sandbox dialogs while navigating lists - Fixed IDE integration not auto-connecting when Claude Code is launched inside tmux or screen - Fixed CJK characters visually bleeding into adjacent UI elements when clipped at the right edge - Fixed teammate panes not closing when the leader exits - Fixed iTerm2 auto mode not detecting iTerm2 for native split-pane teammates - Faster startup on macOS (~60ms) by reading keychain credentials in parallel with module loading - Faster `--resume` on fork-heavy and very large sessions — up to 45% faster loading and ~100-150MB less peak memory - Improved Esc to abort in-flight non-streaming API requests - Improved `claude plugin validate` to check skill, agent, and command frontmatter plus `hooks/hooks.json`, catching YAML parse errors and schema violations - Background bash tasks are now killed if output exceeds 5GB, preventing runaway processes from filling disk - Sessions are now auto-named from plan content when you accept a plan - Improved headless mode plugin installation to compose correctly with `CLAUDE_CODE_PLUGIN_SEED_DIR` - Show a notice when `apiKeyHelper` takes longer than 10s, preventing it from blocking the main loop - The Agent tool no longer accepts a `resume` parameter — use `SendMessage({to: agentId})` to continue a previously spawned agent - `SendMessage` now auto-resumes stopped agents in the background instead of returning an error - Renamed `/fork` to `/branch` (`/fork` still works as an alias) - [VSCode] Improved plan preview tab titles to use the plan's heading instead of "Claude's Plan" - [VSCode] When option+click doesn't trigger native selection on macOS, the footer now points to the `macOptionClickForcesSelection` setting ## 2.1.76 - Added MCP elicitation support — MCP servers can now request structured input mid-task via an interactive dialog (form fields or browser URL) - Added new `Elicitation` and `ElicitationResult` hooks to intercept and override responses before they're sent back - Added `-n` / `--name ` CLI flag to set a display name for the session at startup - Added `worktree.sparsePaths` setting for `claude --worktree` in large monorepos to check out only the directories you need via git sparse-checkout - Added `PostCompact` hook that fires after compaction completes - Added `/effort` slash command to set model effort level - Added session quality survey — enterprise admins can configure the sample rate via the `feedbackSurveyRate` setting - Fixed deferred tools (loaded via `ToolSearch`) losing their input schemas after conversation compaction, causing array and number parameters to be rejected with type errors - Fixed slash commands showing "Unknown skill" - Fixed plan mode asking for re-approval after the plan was already accepted - Fixed voice mode swallowing keypresses while a permission dialog or plan editor was open - Fixed `/voice` not working on Windows when installed via npm - Fixed spurious "Context limit reached" when invoking a skill with `model:` frontmatter on a 1M-context session - Fixed "adaptive thinking is not supported on this model" error when using non-standard model strings - Fixed `Bash(cmd:*)` permission rules not matching when a quoted argument contains `#` - Fixed "don't ask again" in the Bash permission dialog showing the full raw command for pipes and compound commands - Fixed auto-compaction retrying indefinitely after consecutive failures — a circuit breaker now stops after 3 attempts - Fixed MCP reconnect spinner persisting after successful reconnection - Fixed LSP plugins not registering servers when the LSP Manager initialized before marketplaces were reconciled - Fixed clipboard copying in tmux over SSH — now attempts both direct terminal write and tmux clipboard integration - Fixed `/export` showing only the filename instead of the full file path in the success message - Fixed transcript not auto-scrolling to new messages after selecting text - Fixed Escape key not working to exit the login method selection screen - Fixed several Remote Control issues: sessions silently dying when the server reaps an idle environment, rapid messages being queued one-at-a-time instead of batched, and stale work items causing redelivery after JWT refresh - Fixed bridge sessions failing to recover after extended WebSocket disconnects - Fixed slash commands not found when typing the exact name of a soft-hidden command - Improved `--worktree` startup performance by reading git refs directly and skipping redundant `git fetch` when the remote branch is already available locally - Improved background agent behavior — killing a background agent now preserves its partial results in the conversation context - Improved model fallback notifications — now always visible instead of hidden behind verbose mode, with human-friendly model names - Improved blockquote readability on dark terminal themes — text is now italic with a left bar instead of dim - Improved stale worktree cleanup — worktrees left behind after an interrupted parallel run are now automatically cleaned up - Improved Remote Control session titles — now derived from your first prompt instead of showing "Interactive session" - Improved `/voice` to show your dictation language on enable and warn when your `language` setting isn't supported for voice input - Updated `--plugin-dir` to only accept one path to support subcommands — use repeated `--plugin-dir` for multiple directories - [VSCode] Fixed gitignore patterns containing commas silently excluding entire filetypes from the @-mention file picker ## 2.1.75 - Added 1M context window for Opus 4.6 by default for Max, Team, and Enterprise plans (previously required extra usage) - Added `/color` command for all users to set a prompt-bar color for your session - Added session name display on the prompt bar when using `/rename` - Added last-modified timestamps to memory files, helping Claude reason about which memories are fresh vs. stale - Added hook source display (settings/plugin/skill) in permission prompts when a hook requires confirmation - Fixed voice mode not activating correctly on fresh installs without toggling `/voice` twice - Fixed the Claude Code header not updating the displayed model name after switching models with `/model` or Option+P - Fixed session crash when an attachment message computation returns undefined values - Fixed Bash tool mangling `!` in piped commands (e.g., `jq 'select(.x != .y)'` now works correctly) - Fixed managed-disabled plugins showing up in the `/plugin` Installed tab — plugins force-disabled by your organization are now hidden - Fixed token estimation over-counting for thinking and `tool_use` blocks, preventing premature context compaction - Fixed corrupted marketplace config path handling - Fixed `/resume` losing session names after resuming a forked or continued session - Fixed Esc not closing the `/status` dialog after visiting the Config tab - Fixed input handling when accepting or rejecting a plan - Fixed footer hint in agent teams showing "↓ to expand" instead of the correct "shift + ↓ to expand" - Improved startup performance on macOS non-MDM machines by skipping unnecessary subprocess spawns - Suppressed async hook completion messages by default (visible with `--verbose` or transcript mode) - Breaking change: Removed deprecated Windows managed settings fallback at `C:\ProgramData\ClaudeCode\managed-settings.json` — use `C:\Program Files\ClaudeCode\managed-settings.json` ## 2.1.74 - Added actionable suggestions to `/context` command — identifies context-heavy tools, memory bloat, and capacity warnings with specific optimization tips - Added `autoMemoryDirectory` setting to configure a custom directory for auto-memory storage - Fixed memory leak where streaming API response buffers were not released when the generator was terminated early, causing unbounded RSS growth on the Node.js/npm code path - Fixed managed policy `ask` rules being bypassed by user `allow` rules or skill `allowed-tools` - Fixed full model IDs (e.g., `claude-opus-4-5`) being silently ignored in agent frontmatter `model:` field and `--agents` JSON config — agents now accept the same model values as `--model` - Fixed MCP OAuth authentication hanging when the callback port is already in use - Fixed MCP OAuth refresh never prompting for re-auth after the refresh token expires, for OAuth servers that return errors with HTTP 200 (e.g. Slack) - Fixed voice mode silently failing on the macOS native binary for users whose terminal had never been granted microphone permission — the binary now includes the `audio-input` entitlement so macOS prompts correctly - Fixed `SessionEnd` hooks being killed after 1.5 s on exit regardless of `hook.timeout` — now configurable via `CLAUDE_CODE_SESSIONEND_HOOKS_TIMEOUT_MS` - Fixed `/plugin install` failing inside the REPL for marketplace plugins with local sources - Fixed marketplace update not syncing git submodules — plugin sources in submodules no longer break after update - Fixed unknown slash commands with arguments silently dropping input — now shows your input as a warning - Fixed Hebrew, Arabic, and other RTL text not rendering correctly in Windows Terminal, conhost, and VS Code integrated terminal - Fixed LSP servers not working on Windows due to malformed file URIs - Changed `--plugin-dir` so local dev copies now override installed marketplace plugins with the same name (unless that plugin is force-enabled by managed settings) - [VSCode] Fixed delete button not working for Untitled sessions - [VSCode] Improved scroll wheel responsiveness in the integrated terminal with terminal-aware acceleration ## 2.1.73 - Added `modelOverrides` setting to map model picker entries to custom provider model IDs (e.g. Bedrock inference profile ARNs) - Added actionable guidance when OAuth login or connectivity checks fail due to SSL certificate errors (corporate proxies, `NODE_EXTRA_CA_CERTS`) - Fixed freezes and 100% CPU loops triggered by permission prompts for complex bash commands - Fixed a deadlock that could freeze Claude Code when many skill files changed at once (e.g. during `git pull` in a repo with a large `.claude/skills/` directory) - Fixed Bash tool output being lost when running multiple Claude Code sessions in the same project directory - Fixed subagents with `model: opus`/`sonnet`/`haiku` being silently downgraded to older model versions on Bedrock, Vertex, and Microsoft Foundry - Fixed background bash processes spawned by subagents not being cleaned up when the agent exits - Fixed `/resume` showing the current session in the picker - Fixed `/ide` crashing with `onInstall is not defined` when auto-installing the extension - Fixed `/loop` not being available on Bedrock/Vertex/Foundry and when telemetry was disabled - Fixed SessionStart hooks firing twice when resuming a session via `--resume` or `--continue` - Fixed JSON-output hooks injecting no-op system-reminder messages into the model's context on every turn - Fixed voice mode session corruption when a slow connection overlaps a new recording - Fixed Linux sandbox failing to start with "ripgrep (rg) not found" on native builds - Fixed Linux native modules not loading on Amazon Linux 2 and other glibc 2.26 systems - Fixed "media_type: Field required" API error when receiving images via Remote Control - Fixed `/heapdump` failing on Windows with `EEXIST` error when the Desktop folder already exists - Improved Up arrow after interrupting Claude — now restores the interrupted prompt and rewinds the conversation in one step - Improved IDE detection speed at startup - Improved clipboard image pasting performance on macOS - Improved `/effort` to work while Claude is responding, matching `/model` behavior - Improved voice mode to automatically retry transient connection failures during rapid push-to-talk re-press - Improved the Remote Control spawn mode selection prompt with better context - Changed default Opus model on Bedrock, Vertex, and Microsoft Foundry to Opus 4.6 (was Opus 4.1) - Deprecated `/output-style` command — use `/config` instead. Output style is now fixed at session start for better prompt caching - VSCode: Fixed HTTP 400 errors for users behind proxies or on Bedrock/Vertex with Claude 4.5 models ## 2.1.72 - Fixed tool search to activate even with `ANTHROPIC_BASE_URL` as long as `ENABLE_TOOL_SEARCH` is set. - Added `w` key in `/copy` to write the focused selection directly to a file, bypassing the clipboard (useful over SSH) - Added optional description argument to `/plan` (e.g., `/plan fix the auth bug`) that enters plan mode and immediately starts - Added `ExitWorktree` tool to leave an `EnterWorktree` session - Added `CLAUDE_CODE_DISABLE_CRON` environment variable to immediately stop scheduled cron jobs mid-session - Added `lsof`, `pgrep`, `tput`, `ss`, `fd`, and `fdfind` to the bash auto-approval allowlist, reducing permission prompts for common read-only operations - Restored the `model` parameter on the Agent tool for per-invocation model overrides - Simplified effort levels to low/medium/high (removed max) with new symbols (○ ◐ ●) and a brief notification instead of a persistent icon. Use `/effort auto` to reset to default - Improved `/config` — Escape now cancels changes, Enter saves and closes, Space toggles settings - Improved up-arrow history to show current session's messages first when running multiple concurrent sessions - Improved voice input transcription accuracy for repo names and common dev terms (regex, OAuth, JSON) - Improved bash command parsing by switching to a native module — faster initialization and no memory leak - Reduced bundle size by ~510 KB - Changed CLAUDE.md HTML comments (``) to be hidden from Claude when auto-injected. Comments remain visible when read with the Read tool - Fixed slow exits when background tasks or hooks were slow to respond - Fixed agent task progress stuck on "Initializing…" - Fixed skill hooks firing twice per event when a hooks-enabled skill is invoked by the model - Fixed several voice mode issues: occasional input lag, false "No speech detected" errors after releasing push-to-talk, and stale transcripts re-filling the prompt after submission - Fixed `--continue` not resuming from the most recent point after `--compact` - Fixed bash security parsing edge cases - Added support for marketplace git URLs without `.git` suffix (Azure DevOps, AWS CodeCommit) - Improved marketplace clone failure messages to show diagnostic info even when git produces no stderr - Fixed several plugin issues: installation failing on Windows with `EEXIST` error in OneDrive folders, marketplace blocking user-scope installs when a project-scope install exists, `CLAUDE_CODE_PLUGIN_CACHE_DIR` creating literal `~` directories, and `plugin.json` with marketplace-only fields failing to load - Fixed feedback survey appearing too frequently in long sessions - Fixed `--effort` CLI flag being reset by unrelated settings writes on startup - Fixed backgrounded Ctrl+B queries losing their transcript or corrupting the new conversation after `/clear` - Fixed `/clear` killing background agent/bash tasks — only foreground tasks are now cleared - Fixed worktree isolation issues: Task tool resume not restoring cwd, and background task notifications missing `worktreePath` and `worktreeBranch` - Fixed `/model` not displaying results when run while Claude is working - Fixed digit keys selecting menu options instead of typing in plan mode permission prompt's text input - Fixed sandbox permission issues: certain file write operations incorrectly allowed without prompting, and output redirections to allowlisted directories (like `/tmp/claude/`) prompting unnecessarily - Improved CPU utilization in long sessions - Fixed prompt cache invalidation in SDK `query()` calls, reducing input token costs up to 12x - Fixed Escape key becoming unresponsive after cancelling a query - Fixed double Ctrl+C not exiting when background agents or tasks are running - Fixed team agents to inherit the leader's model - Fixed "Always Allow" saving permission rules that never match again - Fixed several hooks issues: `transcript_path` pointing to the wrong directory for resumed/forked sessions, agent `prompt` being silently deleted from settings.json on every settings write, PostToolUse block reason displaying twice, async hooks not receiving stdin with bash `read -r`, and validation error message showing an example that fails validation - Fixed session crashes in Desktop/SDK when Read returned files containing U+2028/U+2029 characters - Fixed terminal title being cleared on exit even when `CLAUDE_CODE_DISABLE_TERMINAL_TITLE` was set - Fixed several permission rule matching issues: wildcard rules not matching commands with heredocs, embedded newlines, or no arguments; `sandbox.excludedCommands` failing with env var prefixes; "always allow" suggesting overly broad prefixes for nested CLI tools; and deny rules not applying to all command forms - Fixed oversized and truncated images from Bash data-URL output - Fixed a crash when resuming sessions that contained Bedrock API errors - Fixed intermittent "expected boolean, received string" validation errors on Edit, Bash, and Grep tool inputs - Fixed multi-line session titles when forking from a conversation whose first message contained newlines - Fixed queued messages not showing attached images, and images being lost when pressing ↑ to edit a queued message - Fixed parallel tool calls where a failed Read/WebFetch/Glob would cancel its siblings — only Bash errors now cascade - VSCode: Fixed scroll speed in integrated terminals not matching native terminals - VSCode: Fixed Shift+Enter submitting input instead of inserting a newline for users with older keybindings - VSCode: Added effort level indicator on the input border - VSCode: Added `vscode://anthropic.claude-code/open` URI handler to open a new Claude Code tab programmatically, with optional `prompt` and `session` query parameters ## 2.1.71 - Added `/loop` command to run a prompt or slash command on a recurring interval (e.g. `/loop 5m check the deploy`) - Added cron scheduling tools for recurring prompts within a session - Added `voice:pushToTalk` keybinding to make the voice activation key rebindable in `keybindings.json` (default: space) — modifier+letter combos like `meta+k` have zero typing interference - Added `fmt`, `comm`, `cmp`, `numfmt`, `expr`, `test`, `printf`, `getconf`, `seq`, `tsort`, and `pr` to the bash auto-approval allowlist - Fixed stdin freeze in long-running sessions where keystrokes stop being processed but the process stays alive - Fixed a 5–8 second startup freeze for users with voice mode enabled, caused by CoreAudio initialization blocking the main thread after system wake - Fixed startup UI freeze when many claude.ai proxy connectors refresh an expired OAuth token simultaneously - Fixed forked conversations (`/fork`) sharing the same plan file, which caused plan edits in one fork to overwrite the other - Fixed the Read tool putting oversized images into context when image processing failed, breaking subsequent turns in long image-heavy sessions - Fixed false-positive permission prompts for compound bash commands containing heredoc commit messages - Fixed plugin installations being lost when running multiple Claude Code instances - Fixed claude.ai connectors failing to reconnect after OAuth token refresh - Fixed claude.ai MCP connector startup notifications appearing for every org-configured connector instead of only previously connected ones - Fixed background agent completion notifications missing the output file path, which made it difficult for parent agents to recover agent results after context compaction - Fixed duplicate output in Bash tool error messages when commands exit with non-zero status - Fixed Chrome extension auto-detection getting permanently stuck on "not installed" after running on a machine without local Chrome - Fixed `/plugin marketplace update` failing with merge conflicts when the marketplace is pinned to a branch/tag ref - Fixed `/plugin marketplace add owner/repo@ref` incorrectly parsing `@` — previously only `#` worked as a ref separator, causing undiagnosable errors with `strictKnownMarketplaces` - Fixed duplicate entries in `/permissions` Workspace tab when the same directory is added with and without a trailing slash - Fixed `--print` hanging forever when team agents are configured — the exit loop no longer waits on long-lived `in_process_teammate` tasks - Fixed "❯ Tool loaded." appearing in the REPL after every `ToolSearch` call - Fixed prompting for `cd && git ...` on Windows when the model uses a mingw-style path - Improved startup time by deferring native image processor loading to first use - Improved bridge session reconnection to complete within seconds after laptop wake from sleep, instead of waiting up to 10 minutes - Improved `/plugin uninstall` to disable project-scoped plugins in `.claude/settings.local.json` instead of modifying `.claude/settings.json`, so changes don't affect teammates - Improved plugin-provided MCP server deduplication — servers that duplicate a manually-configured server (same command/URL) are now skipped, preventing duplicate connections and tool sets. Suppressions are shown in the `/plugin` menu. - Updated `/debug` to toggle debug logging on mid-session, since debug logs are no longer written by default - Removed startup notification noise for unauthenticated org-registered claude.ai connectors ## 2.1.70 - Fixed API 400 errors when using `ANTHROPIC_BASE_URL` with a third-party gateway — tool search now correctly detects proxy endpoints and disables `tool_reference` blocks - Fixed `API Error: 400 This model does not support the effort parameter` when using custom Bedrock inference profiles or other model identifiers not matching standard Claude naming patterns - Fixed empty model responses immediately after `ToolSearch` — the server renders tool schemas with system-prompt-style tags at the prompt tail, which could confuse models into stopping early - Fixed prompt-cache bust when an MCP server with `instructions` connects after the first turn - Fixed Enter inserting a newline instead of submitting when typing over a slow SSH connection - Fixed clipboard corrupting non-ASCII text (CJK, emoji) on Windows/WSL by using PowerShell `Set-Clipboard` - Fixed extra VS Code windows opening at startup on Windows when running from the VS Code integrated terminal - Fixed voice mode failing on Windows native binary with "native audio module could not be loaded" - Fixed push-to-talk not activating on session start when `voiceEnabled: true` was set in settings - Fixed markdown links containing `#NNN` references incorrectly pointing to the current repository instead of the linked URL - Fixed repeated "Model updated to Opus 4.6" notification when a project's `.claude/settings.json` has a legacy Opus model string pinned - Fixed plugins showing as inaccurately installed in `/plugin` - Fixed plugins showing "not found in marketplace" errors on fresh startup by auto-refreshing after marketplace installation - Fixed `/security-review` command failing with `unknown option merge-base` on older git versions - Fixed `/color` command having no way to reset back to the default color — `/color default`, `/color gray`, `/color reset`, and `/color none` now restore the default - Fixed a performance regression in the `AskUserQuestion` preview dialog that re-ran markdown rendering on every keystroke in the notes input - Fixed feature flags read during early startup never refreshing their disk cache, causing stale values to persist across sessions - Fixed `permissions.defaultMode` settings values other than `acceptEdits` or `plan` being applied in Claude Code Remote environments — they are now ignored - Fixed skill listing being re-injected on every `--resume` (~600 tokens saved per resume) - Fixed teleport marker not rendering in VS Code teleported sessions - Improved error message when microphone captures silence to distinguish from "no speech detected" - Improved compaction to preserve images in the summarizer request, allowing prompt cache reuse for faster and cheaper compaction - Improved `/rename` to work while Claude is processing, instead of being silently queued - Reduced prompt input re-renders during turns by ~74% - Reduced startup memory by ~426KB for users without custom CA certificates - Reduced Remote Control `/poll` rate to once per 10 minutes while connected (was 1–2s), cutting server load ~300×. Reconnection is unaffected — transport loss immediately wakes fast polling. - [VSCode] Added spark icon in VS Code activity bar that lists all Claude Code sessions, with sessions opening as full editors - [VSCode] Added full markdown document view for plans in VS Code, with support for adding comments to provide feedback - [VSCode] Added native MCP server management dialog — use `/mcp` in the chat panel to enable/disable servers, reconnect, and manage OAuth authentication without switching to the terminal ## 2.1.69 - Added the `/claude-api` skill for building applications with the Claude API and Anthropic SDK - Added Ctrl+U on an empty bash prompt (`!`) to exit bash mode, matching `escape` and `backspace` - Added numeric keypad support for selecting options in Claude's interview questions (previously only the number row above QWERTY worked) - Added optional name argument to `/remote-control` and `claude remote-control` (`/remote-control My Project` or `--name "My Project"`) to set a custom session title visible in claude.ai/code - Added Voice STT support for 10 new languages (20 total) — Russian, Polish, Turkish, Dutch, Ukrainian, Greek, Czech, Danish, Swedish, Norwegian - Added effort level display (e.g., "with low effort") to the logo and spinner, making it easier to see which effort setting is active - Added agent name display in terminal title when using `claude --agent` - Added `sandbox.enableWeakerNetworkIsolation` setting (macOS only) to allow Go programs like `gh`, `gcloud`, and `terraform` to verify TLS certificates when using a custom MITM proxy with `httpProxyPort` - Added `includeGitInstructions` setting (and `CLAUDE_CODE_DISABLE_GIT_INSTRUCTIONS` env var) to remove built-in commit and PR workflow instructions from Claude's system prompt - Added `/reload-plugins` command to activate pending plugin changes without restarting - Added a one-time startup prompt suggesting Claude Code Desktop on macOS and Windows (max 3 showings, dismissible) - Added `${CLAUDE_SKILL_DIR}` variable for skills to reference their own directory in SKILL.md content - Added `InstructionsLoaded` hook event that fires when CLAUDE.md or `.claude/rules/*.md` files are loaded into context - Added `agent_id` (for subagents) and `agent_type` (for subagents and `--agent`) to hook events - Added `worktree` field to status line hook commands with name, path, branch, and original repo directory when running in a `--worktree` session - Added `pluginTrustMessage` in managed settings to append organization-specific context to the plugin trust warning shown before installation - Added policy limit fetching (e.g., remote control restrictions) for Team plan OAuth users, not just Enterprise - Added `pathPattern` to `strictKnownMarketplaces` for regex-matching file/directory marketplace sources alongside `hostPattern` restrictions - Added plugin source type `git-subdir` to point to a subdirectory within a git repo - Added `oauth.authServerMetadataUrl` config option for MCP servers to specify a custom OAuth metadata discovery URL when standard discovery fails - Fixed a security issue where nested skill discovery could load skills from gitignored directories like `node_modules` - Fixed trust dialog silently enabling all `.mcp.json` servers on first run. You'll now see the per-server approval dialog as expected - Fixed `claude remote-control` crashing immediately on npm installs with "bad option: --sdk-url" (anthropics/claude-code#28334) - Fixed `--model claude-opus-4-0` and `--model claude-opus-4-1` resolving to deprecated Opus versions instead of current - Fixed macOS keychain corruption when using multiple OAuth MCP servers. Large OAuth metadata blobs could overflow the `security -i` stdin buffer, silently leaving stale credentials behind and causing repeated `/login` prompts. - Fixed `.credentials.json` losing `subscriptionType` (showing "Claude API" instead of "Claude Pro"/"Claude Max") when the profile endpoint transiently fails during token refresh (anthropics/claude-code#30185) - Fixed ghost dotfiles (`.bashrc`, `HEAD`, etc.) appearing as untracked files in the working directory after sandboxed Bash commands on Linux - Fixed Shift+Enter printing `[27;2;13~` instead of inserting a newline in Ghostty over SSH - Fixed stash (Ctrl+S) being cleared when submitting a message while Claude is working - Fixed ctrl+o (transcript toggle) freezing for many seconds in long sessions with lots of file edits - Fixed plan mode feedback input not supporting multi-line text entry (backslash+Enter and Shift+Enter now insert newlines) - Fixed cursor not moving down into blank lines at the top of the input box - Fixed `/stats` crash when transcript files contain entries with missing or malformed timestamps - Fixed a brief hang after a streaming error on long sessions (the transcript was being fully rewritten to drop one line; it is now truncated in place) - Fixed `--setting-sources user` not blocking dynamically discovered project skills - Fixed duplicate CLAUDE.md, slash commands, agents, and rules when running from a worktree nested inside its main repo (e.g. `claude -w`) - Fixed plugin Stop/SessionEnd/etc hooks not firing after any `/plugin` operation - Fixed plugin hooks being silently dropped when two plugins use the same `${CLAUDE_PLUGIN_ROOT}/...` command template - Fixed memory leak in long-running SDK/CCR sessions where conversation messages were retained unnecessarily - Fixed API 400 errors in forked agents (autocompact, summarization) when resuming sessions that were interrupted mid-tool-batch - Fixed "unexpected tool_use_id found in tool_result blocks" error when resuming conversations that start with an orphaned tool result - Fixed teammates accidentally spawning nested teammates via the Agent tool's `name` parameter - Fixed `CLAUDE_CODE_MAX_OUTPUT_TOKENS` being ignored during conversation compaction - Fixed `/compact` summary rendering as a user bubble in SDK consumers (Claude Code Remote web UI, VSCode extension) - Fixed voice space bar getting stuck after a failed voice activation (module loading race, cold GrowthBook) - Fixed worktree file copy on Windows - Fixed global `.claude` folder detection on Windows - Fixed symlink bypass where writing new files through a symlinked parent directory could escape the working directory in `acceptEdits` mode - Fixed sandbox prompting users to approve non-allowed domains when `allowManagedDomainsOnly` is enabled in managed settings — non-allowed domains are now blocked automatically with no bypass - Fixed interactive tools (e.g., `AskUserQuestion`) being silently auto-allowed when listed in a skill's allowed-tools, bypassing the permission prompt and running with empty answers - Fixed multi-GB memory spike when committing with large untracked binary files in the working tree - Fixed Escape not interrupting a running turn when the input box has draft text. Use Up arrow to pull queued messages back for editing, or Ctrl+U to clear the input line. - Fixed Android app crash when running local slash commands (`/voice`, `/cost`) in Remote Control sessions - Fixed a memory leak where old message array versions accumulated in React Compiler `memoCache` over long sessions - Fixed a memory leak where REPL render scopes accumulated over long sessions (~35MB over 1000 turns) - Fixed memory retention in in-process teammates where the parent's full conversation history was pinned for the teammate's lifetime, preventing GC after `/clear` or auto-compact - Fixed a memory leak in interactive mode where hook events could accumulate unboundedly during long sessions - Fixed hang when `--mcp-config` points to a corrupted file - Fixed slow startup when many skills/plugins are installed - Fixed `cd && ` permission prompt to surface the chained command instead of only showing "Yes, allow reading from /" - Fixed conditional `.claude/rules/*.md` files (with `paths:` frontmatter) and nested CLAUDE.md files not loading in print mode (`claude -p`) - Fixed `/clear` not fully clearing all session caches, reducing memory retention in long sessions - Fixed terminal flicker caused by animated elements at the scrollback boundary - Fixed UI frame drops on macOS when using MCP servers with OAuth (regression from 2.1.x) - Fixed occasional frame stalls during typing caused by synchronous debug log flushes - Fixed `TeammateIdle` and `TaskCompleted` hooks to support `{"continue": false, "stopReason": "..."}` to stop the teammate, matching `Stop` hook behavior - Fixed `WorktreeCreate` and `WorktreeRemove` plugin hooks being silently ignored - Fixed skill descriptions with colons (e.g., "Triggers include: X, Y, Z") failing to load from SKILL.md frontmatter - Fixed project skills without a `description:` frontmatter field not appearing in Claude's available skills list - Fixed `/context` showing identical token counts for all MCP tools from a server - Fixed literal `nul` file creation on Windows when the model uses CMD-style `2>nul` redirection in Git Bash - Fixed extra blank lines appearing below each tool call in the expanded subagent transcript view (Ctrl+O) - Fixed Tab/arrow keys not cycling Settings tabs when `/config` search box is focused but empty - Fixed service key OAuth sessions (CCR containers) spamming `[ERROR]` logs with 403s from profile-scoped endpoints - Fixed inconsistent color for "Remote Control active" status indicator - Fixed Voice waveform cursor covering the first suffix letter when dictating mid-input - Fixed Voice input showing all 5 spaces during warmup instead of capping at ~2 (aligning with the "keep holding…" hint) - Improved spinner performance by isolating the 50ms animation loop from the surrounding shell, reducing render and CPU overhead during turns - Improved UI rendering performance in native binaries with React Compiler - Improved `--worktree` startup by eliminating a git subprocess on the startup path - Improved macOS startup by eliminating redundant settings-file reloads when managed settings resolve - Improved macOS startup for Claude.ai enterprise/team users by skipping an unnecessary keychain lookup - Improved MCP `-p` startup by pipelining claude.ai config fetch with local connections and using a concurrency pool instead of sequential batching - Improved voice startup by removing imperceptible warmup pulse animations that were causing re-render stutter - Improved MCP binary content handling: tools returning PDFs, Office documents, or audio now save decoded bytes to disk with the correct file extension instead of dumping raw base64 into the conversation context. WebFetch also saves binary responses alongside its summary. - Improved memory usage in long sessions by stabilizing `onSubmit` across message updates - Improved LSP tool rendering and memory context building to no longer read entire files - Improved session upload and memory sync to avoid reading large files into memory before size/binary checks - Improved file operation performance by avoiding reading file contents for existence checks (6 sites) - Improved documentation to clarify that `--append-system-prompt-file` and `--system-prompt-file` work in interactive mode (the docs previously said print mode only) - Reduced baseline memory by ~16MB by deferring Yoga WASM preloading - Reduced memory footprint for SDK and CCR sessions using stream-json output - Reduced memory usage when resuming large sessions (including compacted history) - Reduced token usage on multi-agent tasks with more concise subagent final reports - Changed Sonnet 4.5 users on Pro/Max/Team Premium to be automatically migrated to Sonnet 4.6 - Changed the `/resume` picker to show your most recent prompt instead of the first one. This also resolves some titles appearing as `(session)`. - Changed claude.ai MCP connector failures to show a notification instead of silently disappearing from the tool list - Changed example command suggestions to be generated deterministically instead of calling Haiku - Changed resuming after compaction to no longer produce a preamble recap before continuing - [SDK] Changed task creation to no longer require the `activeForm` field — the spinner falls back to the task subject - [VSCode] Added compaction display as a collapsible "Compacted chat" card with the summary inside - [VSCode] The permission mode picker now respects `permissions.disableBypassPermissionsMode` from your effective Claude Code settings (including managed/policy settings) — when set to `disable`, bypass permissions mode is hidden from the picker - [VSCode] Fixed RTL text (Arabic, Hebrew, Persian) rendering reversed in the chat panel (regression in v2.1.63) ## 2.1.68 - Opus 4.6 now defaults to medium effort for Max and Team subscribers. Medium effort works well for most tasks — it's the sweet spot between speed and thoroughness. You can change this anytime with `/model` - Re-introduced the "ultrathink" keyword to enable high effort for the next turn - Removed Opus 4 and 4.1 from Claude Code on the first-party API — users with these models pinned are automatically moved to Opus 4.6 ## 2.1.66 - Reduced spurious error logging ## 2.1.63 - Added `/simplify` and `/batch` bundled slash commands - Fixed local slash command output like /cost appearing as user-sent messages instead of system messages in the UI - Project configs & auto memory now shared across git worktrees of the same repository - Added `ENABLE_CLAUDEAI_MCP_SERVERS=false` env var to opt out from making claude.ai MCP servers available - Improved `/model` command to show the currently active model in the slash command menu - Added HTTP hooks, which can POST JSON to a URL and receive JSON instead of running a shell command - Fixed listener leak in bridge polling loop - Fixed listener leak in MCP OAuth flow cleanup - Added manual URL paste fallback during MCP OAuth authentication. If the automatic localhost redirect doesn't work, you can paste the callback URL to complete authentication. - Fixed memory leak when navigating hooks configuration menu - Fixed listener leak in interactive permission handler during auto-approvals - Fixed file count cache ignoring glob ignore patterns - Fixed memory leak in bash command prefix cache - Fixed MCP tool/resource cache leak on server reconnect - Fixed IDE host IP detection cache incorrectly sharing results across ports - Fixed WebSocket listener leak on transport reconnect - Fixed memory leak in git root detection cache that could cause unbounded growth in long-running sessions - Fixed memory leak in JSON parsing cache that grew unbounded over long sessions - VSCode: Fixed remote sessions not appearing in conversation history - Fixed a race condition in the REPL bridge where new messages could arrive at the server interleaved with historical messages during the initial connection flush, causing message ordering issues. - Fixed memory leak where long-running teammates retained all messages in AppState even after conversation compaction - Fixed a memory leak where MCP server fetch caches were not cleared on disconnect, causing growing memory usage with servers that reconnect frequently - Improved memory usage in long sessions with subagents by stripping heavy progress message payloads during context compaction - Added "Always copy full response" option to the `/copy` picker. When selected, future `/copy` commands will skip the code block picker and copy the full response directly. - VSCode: Added session rename and remove actions to the sessions list - Fixed `/clear` not resetting cached skills, which could cause stale skill content to persist in the new conversation ## 2.1.62 - Fixed prompt suggestion cache regression that reduced cache hit rates ## 2.1.61 - Fixed concurrent writes corrupting config file on Windows ## 2.1.59 - Claude automatically saves useful context to auto-memory. Manage with /memory - Added `/copy` command to show an interactive picker when code blocks are present, allowing selection of individual code blocks or the full response. - Improved "always allow" prefix suggestions for compound bash commands (e.g. `cd /tmp && git fetch && git push`) to compute smarter per-subcommand prefixes instead of treating the whole command as one - Improved ordering of short task lists - Improved memory usage in multi-agent sessions by releasing completed subagent task state - Fixed MCP OAuth token refresh race condition when running multiple Claude Code instances simultaneously - Fixed shell commands not showing a clear error message when the working directory has been deleted - Fixed config file corruption that could wipe authentication when multiple Claude Code instances ran simultaneously ## 2.1.58 - Expand Remote Control to more users ## 2.1.56 - VS Code: Fixed another cause of "command 'claude-vscode.editor.openLast' not found" crashes ## 2.1.55 - Fixed BashTool failing on Windows with EINVAL error ## 2.1.53 - Fixed a UI flicker where user input would briefly disappear after submission before the message rendered - Fixed bulk agent kill (ctrl+f) to send a single aggregate notification instead of one per agent, and to properly clear the command queue - Fixed graceful shutdown sometimes leaving stale sessions when using Remote Control by parallelizing teardown network calls - Fixed `--worktree` sometimes being ignored on first launch - Fixed a panic ("switch on corrupted value") on Windows - Fixed a crash that could occur when spawning many processes on Windows - Fixed a crash in the WebAssembly interpreter on Linux x64 & Windows x64 - Fixed a crash that sometimes occurred after 2 minutes on Windows ARM64 ## 2.1.52 - VS Code: Fixed extension crash on Windows ("command 'claude-vscode.editor.openLast' not found") ## 2.1.51 - Added `claude remote-control` subcommand for external builds, enabling local environment serving for all users. - Updated plugin marketplace default git timeout from 30s to 120s and added `CLAUDE_CODE_PLUGIN_GIT_TIMEOUT_MS` to configure. - Added support for custom npm registries and specific version pinning when installing plugins from npm sources - BashTool now skips login shell (`-l` flag) by default when a shell snapshot is available, improving command execution performance. Previously this required setting `CLAUDE_BASH_NO_LOGIN=true`. - Fixed a security issue where `statusLine` and `fileSuggestion` hook commands could execute without workspace trust acceptance in interactive mode. - Tool results larger than 50K characters are now persisted to disk (previously 100K). This reduces context window usage and improves conversation longevity. - Fixed a bug where duplicate `control_response` messages (e.g. from WebSocket reconnects) could cause API 400 errors by pushing duplicate assistant messages into the conversation. - Added `CLAUDE_CODE_ACCOUNT_UUID`, `CLAUDE_CODE_USER_EMAIL`, and `CLAUDE_CODE_ORGANIZATION_UUID` environment variables for SDK callers to provide account info synchronously, eliminating a race condition where early telemetry events lacked account metadata. - Fixed slash command autocomplete crashing when a plugin's SKILL.md description is a YAML array or other non-string type - The `/model` picker now shows human-readable labels (e.g., "Sonnet 4.5") instead of raw model IDs for pinned model versions, with an upgrade hint when a newer version is available. - Managed settings can now be set via macOS plist or Windows Registry. Learn more at https://code.claude.com/docs/en/settings#settings-files ## 2.1.50 - Added support for `startupTimeout` configuration for LSP servers - Added `WorktreeCreate` and `WorktreeRemove` hook events, enabling custom VCS setup and teardown when agent worktree isolation creates or removes worktrees. - Fixed a bug where resumed sessions could be invisible when the working directory involved symlinks, because the session storage path was resolved at different times during startup. Also fixed session data loss on SSH disconnect by flushing session data before hooks and analytics in the graceful shutdown sequence. - Linux: Fixed native modules not loading on systems with glibc older than 2.30 (e.g., RHEL 8) - Fixed memory leak in agent teams where completed teammate tasks were never garbage collected from session state - Fixed `CLAUDE_CODE_SIMPLE` to fully strip down skills, session memory, custom agents, and CLAUDE.md token counting - Fixed `/mcp reconnect` freezing the CLI when given a server name that doesn't exist - Fixed memory leak where completed task state objects were never removed from AppState - Added support for `isolation: worktree` in agent definitions, allowing agents to declaratively run in isolated git worktrees. - `CLAUDE_CODE_SIMPLE` mode now also disables MCP tools, attachments, hooks, and CLAUDE.md file loading for a fully minimal experience. - Fixed bug where MCP tools were not discovered when tool search is enabled and a prompt is passed in as a launch argument - Improved memory usage during long sessions by clearing internal caches after compaction - Added `claude agents` CLI command to list all configured agents - Improved memory usage during long sessions by clearing large tool results after they have been processed - Fixed a memory leak where LSP diagnostic data was never cleaned up after delivery, causing unbounded memory growth in long sessions - Fixed a memory leak where completed task output was not freed from memory, reducing memory usage in long sessions with many tasks - Improved startup performance for headless mode (`-p` flag) by deferring Yoga WASM and UI component imports - Fixed prompt suggestion cache regression that reduced cache hit rates - Fixed unbounded memory growth in long sessions by capping file history snapshots - Added `CLAUDE_CODE_DISABLE_1M_CONTEXT` environment variable to disable 1M context window support - Opus 4.6 (fast mode) now includes the full 1M context window - VSCode: Added `/extra-usage` command support in VS Code sessions - Fixed memory leak where TaskOutput retained recent lines after cleanup - Fixed memory leak in CircularBuffer where cleared items were retained in the backing array - Fixed memory leak in shell command execution where ChildProcess and AbortController references were retained after cleanup ## 2.1.49 - Improved MCP OAuth authentication with step-up auth support and discovery caching, reducing redundant network requests during server connections - Added `--worktree` (`-w`) flag to start Claude in an isolated git worktree - Subagents support `isolation: "worktree"` for working in a temporary git worktree - Added Ctrl+F keybinding to kill background agents (two-press confirmation) - Agent definitions support `background: true` to always run as a background task - Plugins can ship `settings.json` for default configuration - Fixed file-not-found errors to suggest corrected paths when the model drops the repo folder - Fixed Ctrl+C and ESC being silently ignored when background agents are running and the main thread is idle. Pressing twice within 3 seconds now kills all background agents. - Fixed prompt suggestion cache regression that reduced cache hit rates. - Fixed `plugin enable` and `plugin disable` to auto-detect the correct scope when `--scope` is not specified, instead of always defaulting to user scope - Simple mode (`CLAUDE_CODE_SIMPLE`) now includes the file edit tool in addition to the Bash tool, allowing direct file editing in simple mode. - Permission suggestions are now populated when safety checks trigger an ask response, enabling SDK consumers to display permission options - Sonnet 4.5 with 1M context is being removed from the Max plan in favor of our frontier Sonnet 4.6 model, which now has 1M context. Please switch in /model. - Fixed verbose mode not updating thinking block display when toggled via `/config` — memo comparators now correctly detect verbose changes - Fixed unbounded WASM memory growth during long sessions by periodically resetting the tree-sitter parser - Fixed potential rendering issues caused by stale yoga layout references - Improved performance in non-interactive mode (`-p`) by skipping unnecessary API calls during startup - Improved performance by caching authentication failures for HTTP and SSE MCP servers, avoiding repeated connection attempts to servers requiring auth - Fixed unbounded memory growth during long-running sessions caused by Yoga WASM linear memory never shrinking - SDK model info now includes `supportsEffort`, `supportedEffortLevels`, and `supportsAdaptiveThinking` fields so consumers can discover model capabilities. - Added `ConfigChange` hook event that fires when configuration files change during a session, enabling enterprise security auditing and optional blocking of settings changes. - Improved startup performance by caching MCP auth failures to avoid redundant connection attempts - Improved startup performance by reducing HTTP calls for analytics token counting - Improved startup performance by batching MCP tool token counting into a single API call - Fixed `disableAllHooks` setting to respect managed settings hierarchy — non-managed settings can no longer disable managed hooks set by policy (#26637) - Fixed `--resume` session picker showing raw XML tags for sessions that start with commands like `/clear`. Now correctly falls through to the session ID fallback. - Improved permission prompts for path safety and working directory blocks to show the reason for the restriction instead of a bare prompt with no context ## 2.1.47 - Fixed FileWriteTool line counting to preserve intentional trailing blank lines instead of stripping them with `trimEnd()`. - Fixed Windows terminal rendering bugs caused by `os.EOL` (`\r\n`) in display code — line counts now show correct values instead of always showing 1 on Windows. - Improved VS Code plan preview: auto-updates as Claude iterates, enables commenting only when the plan is ready for review, and keeps the preview open when rejecting so Claude can revise. - Fixed a bug where bold and colored text in markdown output could shift to the wrong characters on Windows due to `\r\n` line endings. - Fixed compaction failing when conversation contains many PDF documents by stripping document blocks alongside images before sending to the compaction API (anthropics/claude-code#26188) - Improved memory usage in long-running sessions by releasing API stream buffers, agent context, and skill state after use - Improved startup performance by deferring SessionStart hook execution, reducing time-to-interactive by ~500ms. - Fixed an issue where bash tool output was silently discarded on Windows when using MSYS2 or Cygwin shells. - Improved performance of `@` file mentions - file suggestions now appear faster by pre-warming the index on startup and using session-based caching with background refresh. - Improved memory usage by trimming agent task message history after tasks complete - Improved memory usage during long agent sessions by eliminating O(n²) message accumulation in progress updates - Fixed the bash permission classifier to validate that returned match descriptions correspond to actual input rules, preventing hallucinated descriptions from incorrectly granting permissions - Fixed user-defined agents only loading one file on NFS/FUSE filesystems that report zero inodes (anthropics/claude-code#26044) - Fixed plugin agent skills silently failing to load when referenced by bare name instead of fully-qualified plugin name (anthropics/claude-code#25834) - Search patterns in collapsed tool results are now displayed in quotes for clarity - Windows: Fixed CWD tracking temp files never being cleaned up, causing them to accumulate indefinitely (anthropics/claude-code#17600) - Use `ctrl+f` to kill all background agents instead of double-pressing ESC. Background agents now continue running when you press ESC to cancel the main thread, giving you more control over agent lifecycle. - Fixed API 400 errors ("thinking blocks cannot be modified") that occurred in sessions with concurrent agents, caused by interleaved streaming content blocks preventing proper message merging. - Simplified teammate navigation to use only Shift+Down (with wrapping) instead of both Shift+Up and Shift+Down. - Fixed an issue where a single file write/edit error would abort all other parallel file write/edit operations. Independent file mutations now complete even when a sibling fails. - Added `last_assistant_message` field to Stop and SubagentStop hook inputs, providing the final assistant response text so hooks can access it without parsing transcript files. - Fixed custom session titles set via `/rename` being lost after resuming a conversation (anthropics/claude-code#23610) - Fixed collapsed read/search hint text overflowing on narrow terminals by truncating from the start. - Fixed an issue where bash commands with backslash-newline continuation lines (e.g., long commands split across multiple lines with `\`) would produce spurious empty arguments, potentially breaking command execution. - Fixed built-in slash commands (`/help`, `/model`, `/compact`, etc.) being hidden from the autocomplete dropdown when many user skills are installed (anthropics/claude-code#22020) - Fixed MCP servers not appearing in the MCP Management Dialog after deferred loading - Fixed session name persisting in status bar after `/clear` command (anthropics/claude-code#26082) - Fixed crash when a skill's `name` or `description` in SKILL.md frontmatter is a bare number (e.g., `name: 3000`) — the value is now properly coerced to a string (anthropics/claude-code#25837) - Fixed /resume silently dropping sessions when the first message exceeds 16KB or uses array-format content (anthropics/claude-code#25721) - Added `chat:newline` keybinding action for configurable multi-line input (anthropics/claude-code#26075) - Added `added_dirs` to the statusline JSON `workspace` section, exposing directories added via `/add-dir` to external scripts (anthropics/claude-code#26096) - Fixed `claude doctor` misclassifying mise and asdf-managed installations as native installs (anthropics/claude-code#26033) - Fixed zsh heredoc failing with "read-only file system" error in sandboxed commands (anthropics/claude-code#25990) - Fixed agent progress indicator showing inflated tool use count (anthropics/claude-code#26023) - Fixed image pasting not working on WSL2 systems where Windows copies images as BMP format (anthropics/claude-code#25935) - Fixed background agent results returning raw transcript data instead of the agent's final answer (anthropics/claude-code#26012) - Fixed Warp terminal incorrectly prompting for Shift+Enter setup when it supports it natively (anthropics/claude-code#25957) - Fixed CJK wide characters causing misaligned timestamps and layout elements in the TUI (anthropics/claude-code#26084) - Fixed custom agent `model` field in `.claude/agents/*.md` being ignored when spawning team teammates (anthropics/claude-code#26064) - Fixed plan mode being lost after context compaction, causing the model to switch from planning to implementation mode (anthropics/claude-code#26061) - Fixed `alwaysThinkingEnabled: true` in settings.json not enabling thinking mode on Bedrock and Vertex providers (anthropics/claude-code#26074) - Fixed `tool_decision` OTel telemetry event not being emitted in headless/SDK mode (anthropics/claude-code#26059) - Fixed session name being lost after context compaction — renamed sessions now preserve their custom title through compaction (anthropics/claude-code#26121) - Increased initial session count in resume picker from 10 to 50 for faster session discovery (anthropics/claude-code#26123) - Windows: fixed worktree session matching when drive letter casing differs (anthropics/claude-code#26123) - Fixed `/resume ` failing to find sessions whose first message exceeds 16KB (anthropics/claude-code#25920) - Fixed "Always allow" on multiline bash commands creating invalid permission patterns that corrupt settings (anthropics/claude-code#25909) - Fixed React crash (error #31) when a skill's `argument-hint` in SKILL.md frontmatter uses YAML sequence syntax (e.g., `[topic: foo | bar]`) — the value is now properly coerced to a string (anthropics/claude-code#25826) - Fixed crash when using `/fork` on sessions that used web search — null entries in search results from transcript deserialization are now handled gracefully (anthropics/claude-code#25811) - Fixed read-only git commands triggering FSEvents file watcher loops on macOS by adding --no-optional-locks flag (anthropics/claude-code#25750) - Fixed custom agents and skills not being discovered when running from a git worktree — project-level `.claude/agents/` and `.claude/skills/` from the main repository are now included (anthropics/claude-code#25816) - Fixed non-interactive subcommands like `claude doctor` and `claude plugin validate` being blocked inside nested Claude sessions (anthropics/claude-code#25803) - Windows: Fixed the same CLAUDE.md file being loaded twice when drive letter casing differs between paths (anthropics/claude-code#25756) - Fixed inline code spans in markdown being incorrectly parsed as bash commands (anthropics/claude-code#25792) - Fixed teammate spinners not respecting custom spinnerVerbs from settings (anthropics/claude-code#25748) - Fixed shell commands permanently failing after a command deletes its own working directory (anthropics/claude-code#26136) - Fixed hooks (PreToolUse, PostToolUse) silently failing to execute on Windows by using Git Bash instead of cmd.exe (anthropics/claude-code#25981) - Fixed LSP `findReferences` and other location-based operations returning results from gitignored files (e.g., `node_modules/`, `venv/`) (anthropics/claude-code#26051) - Moved config backup files from home directory root to `~/.claude/backups/` to reduce home directory clutter (anthropics/claude-code#26130) - Fixed sessions with large first prompts (>16KB) disappearing from the /resume list (anthropics/claude-code#26140) - Fixed shell functions with double-underscore prefixes (e.g., `__git_ps1`) not being preserved across shell sessions (anthropics/claude-code#25824) - Fixed spinner showing "0 tokens" counter before any tokens have been received (anthropics/claude-code#26105) - VSCode: Fixed conversation messages appearing dimmed while the AskUserQuestion dialog is open (anthropics/claude-code#26078) - Fixed background tasks failing in git worktrees due to remote URL resolution reading from worktree-specific gitdir instead of the main repository config (anthropics/claude-code#26065) - Fixed Right Alt key leaving visible `[25~` escape sequence residue in the input field on Windows/Git Bash terminals (anthropics/claude-code#25943) - The `/rename` command now updates the terminal tab title by default (anthropics/claude-code#25789) - Fixed Edit tool silently corrupting Unicode curly quotes (\u201c\u201d \u2018\u2019) by replacing them with straight quotes when making edits (anthropics/claude-code#26141) - Fixed OSC 8 hyperlinks only being clickable on the first line when link text wraps across multiple terminal lines. ## 2.1.46 - Fixed orphaned CC processes after terminal disconnect on macOS - Added support for using claude.ai MCP connectors in Claude Code ## 2.1.45 - Added support for Claude Sonnet 4.6 - Added support for reading `enabledPlugins` and `extraKnownMarketplaces` from `--add-dir` directories - Added `spinnerTipsOverride` setting to customize spinner tips — configure `tips` with an array of custom tip strings, and optionally set `excludeDefault: true` to show only your custom tips instead of the built-in ones - Added `SDKRateLimitInfo` and `SDKRateLimitEvent` types to the SDK, enabling consumers to receive rate limit status updates including utilization, reset times, and overage information - Fixed Agent Teams teammates failing on Bedrock, Vertex, and Foundry by propagating API provider environment variables to tmux-spawned processes (anthropics/claude-code#23561) - Fixed sandbox "operation not permitted" errors when writing temporary files on macOS by using the correct per-user temp directory (anthropics/claude-code#21654) - Fixed Task tool (backgrounded agents) crashing with a `ReferenceError` on completion (anthropics/claude-code#22087) - Fixed autocomplete suggestions not being accepted on Enter when images are pasted in the input - Fixed skills invoked by subagents incorrectly appearing in main session context after compaction - Fixed excessive `.claude.json.backup` files accumulating on every startup - Fixed plugin-provided commands, agents, and hooks not being available immediately after installation without requiring a restart - Improved startup performance by removing eager loading of session history for stats caching - Improved memory usage for shell commands that produce large output — RSS no longer grows unboundedly with command output size - Improved collapsed read/search groups to show the current file or search pattern being processed beneath the summary line while active - [VSCode] Improved permission destination choice (project/user/session) to persist across sessions ## 2.1.44 - Fixed ENAMETOOLONG errors for deeply-nested directory paths - Fixed auth refresh errors ## 2.1.43 - Fixed AWS auth refresh hanging indefinitely by adding a 3-minute timeout - Fixed spurious warnings for non-agent markdown files in `.claude/agents/` directory - Fixed structured-outputs beta header being sent unconditionally on Vertex/Bedrock ## 2.1.42 - Improved startup performance by deferring Zod schema construction - Improved prompt cache hit rates by moving date out of system prompt - Added one-time Opus 4.6 effort callout for eligible users - Fixed /resume showing interrupt messages as session titles - Fixed image dimension limit errors to suggest /compact ## 2.1.41 - Added guard against launching Claude Code inside another Claude Code session - Fixed Agent Teams using wrong model identifier for Bedrock, Vertex, and Foundry customers - Fixed a crash when MCP tools return image content during streaming - Fixed /resume session previews showing raw XML tags instead of readable command names - Improved model error messages for Bedrock/Vertex/Foundry users with fallback suggestions - Fixed plugin browse showing misleading "Space to Toggle" hint for already-installed plugins - Fixed hook blocking errors (exit code 2) not showing stderr to the user - Added `speed` attribute to OTel events and trace spans for fast mode visibility - Added `claude auth login`, `claude auth status`, and `claude auth logout` CLI subcommands - Added Windows ARM64 (win32-arm64) native binary support - Improved `/rename` to auto-generate session name from conversation context when called without arguments - Improved narrow terminal layout for prompt footer - Fixed file resolution failing for @-mentions with anchor fragments (e.g., `@README.md#installation`) - Fixed FileReadTool blocking the process on FIFOs, `/dev/stdin`, and large files - Fixed background task notifications not being delivered in streaming Agent SDK mode - Fixed cursor jumping to end on each keystroke in classifier rule input - Fixed markdown link display text being dropped for raw URL - Fixed auto-compact failure error notifications being shown to users - Fixed permission wait time being included in subagent elapsed time display - Fixed proactive ticks firing while in plan mode - Fixed clear stale permission rules when settings change on disk - Fixed hook blocking errors showing stderr content in UI ## 2.1.39 - Improved terminal rendering performance - Fixed fatal errors being swallowed instead of displayed - Fixed process hanging after session close - Fixed character loss at terminal screen boundary - Fixed blank lines in verbose transcript view ## 2.1.38 - Fixed VS Code terminal scroll-to-top regression introduced in 2.1.37 - Fixed Tab key queueing slash commands instead of autocompleting - Fixed bash permission matching for commands using environment variable wrappers - Fixed text between tool uses disappearing when not using streaming - Fixed duplicate sessions when resuming in VS Code extension - Improved heredoc delimiter parsing to prevent command smuggling - Blocked writes to `.claude/skills` directory in sandbox mode ## 2.1.37 - Fixed an issue where /fast was not immediately available after enabling /extra-usage ## 2.1.36 - Fast mode is now available for Opus 4.6. Learn more at https://code.claude.com/docs/en/fast-mode ## 2.1.34 - Fixed a crash when agent teams setting changed between renders - Fixed a bug where commands excluded from sandboxing (via `sandbox.excludedCommands` or `dangerouslyDisableSandbox`) could bypass the Bash ask permission rule when `autoAllowBashIfSandboxed` was enabled ## 2.1.33 - Fixed agent teammate sessions in tmux to send and receive messages - Fixed warnings about agent teams not being available on your current plan - Added `TeammateIdle` and `TaskCompleted` hook events for multi-agent workflows - Added support for restricting which sub-agents can be spawned via `Task(agent_type)` syntax in agent "tools" frontmatter - Added `memory` frontmatter field support for agents, enabling persistent memory with `user`, `project`, or `local` scope - Added plugin name to skill descriptions and `/skills` menu for better discoverability - Fixed an issue where submitting a new message while the model was in extended thinking would interrupt the thinking phase - Fixed an API error that could occur when aborting mid-stream, where whitespace text combined with a thinking block would bypass normalization and produce an invalid request - Fixed API proxy compatibility issue where 404 errors on streaming endpoints no longer triggered non-streaming fallback - Fixed an issue where proxy settings configured via `settings.json` environment variables were not applied to WebFetch and other HTTP requests on the Node.js build - Fixed `/resume` session picker showing raw XML markup instead of clean titles for sessions started with slash commands - Improved error messages for API connection failures — now shows specific cause (e.g., ECONNREFUSED, SSL errors) instead of generic "Connection error" - Errors from invalid managed settings are now surfaced - VSCode: Added support for remote sessions, allowing OAuth users to browse and resume sessions from claude.ai - VSCode: Added git branch and message count to the session picker, with support for searching by branch name - VSCode: Fixed scroll-to-bottom under-scrolling on initial session load and session switch ## 2.1.32 - Claude Opus 4.6 is now available! - Added research preview agent teams feature for multi-agent collaboration (token-intensive feature, requires setting CLAUDE_CODE_EXPERIMENTAL_AGENT_TEAMS=1) - Claude now automatically records and recalls memories as it works - Added "Summarize from here" to the message selector, allowing partial conversation summarization. - Skills defined in `.claude/skills/` within additional directories (`--add-dir`) are now loaded automatically. - Fixed `@` file completion showing incorrect relative paths when running from a subdirectory - Updated --resume to re-use --agent value specified in previous conversation by default. - Fixed: Bash tool no longer throws "Bad substitution" errors when heredocs contain JavaScript template literals like `${index + 1}`, which previously interrupted tool execution - Skill character budget now scales with context window (2% of context), so users with larger context windows can see more skill descriptions without truncation - Fixed Thai/Lao spacing vowels (สระ า, ำ) not rendering correctly in the input field - VSCode: Fixed slash commands incorrectly being executed when pressing Enter with preceding text in the input field - VSCode: Added spinner when loading past conversations list ## 2.1.31 - Added session resume hint on exit, showing how to continue your conversation later - Added support for full-width (zenkaku) space input from Japanese IME in checkbox selection - Fixed PDF too large errors permanently locking up sessions, requiring users to start a new conversation - Fixed bash commands incorrectly reporting failure with "Read-only file system" errors when sandbox mode was enabled - Fixed a crash that made sessions unusable after entering plan mode when project config in `~/.claude.json` was missing default fields - Fixed `temperatureOverride` being silently ignored in the streaming API path, causing all streaming requests to use the default temperature (1) regardless of the configured override - Fixed LSP shutdown/exit compatibility with strict language servers that reject null params - Improved system prompts to more clearly guide the model toward using dedicated tools (Read, Edit, Glob, Grep) instead of bash equivalents (`cat`, `sed`, `grep`, `find`), reducing unnecessary bash command usage - Improved PDF and request size error messages to show actual limits (100 pages, 20MB) - Reduced layout jitter in the terminal when the spinner appears and disappears during streaming - Removed misleading Anthropic API pricing from model selector for third-party provider (Bedrock, Vertex, Foundry) users ## 2.1.30 - Added `pages` parameter to the Read tool for PDFs, allowing specific page ranges to be read (e.g., `pages: "1-5"`). Large PDFs (>10 pages) now return a lightweight reference when `@` mentioned instead of being inlined into context. - Added pre-configured OAuth client credentials for MCP servers that don't support Dynamic Client Registration (e.g., Slack). Use `--client-id` and `--client-secret` with `claude mcp add`. - Added `/debug` for Claude to help troubleshoot the current session - Added support for additional `git log` and `git show` flags in read-only mode (e.g., `--topo-order`, `--cherry-pick`, `--format`, `--raw`) - Added token count, tool uses, and duration metrics to Task tool results - Added reduced motion mode to the config - Fixed phantom "(no content)" text blocks appearing in API conversation history, reducing token waste and potential model confusion - Fixed prompt cache not correctly invalidating when tool descriptions or input schemas changed, only when tool names changed - Fixed 400 errors that could occur after running `/login` when the conversation contained thinking blocks - Fixed a hang when resuming sessions with corrupted transcript files containing `parentUuid` cycles - Fixed rate limit message showing incorrect "/upgrade" suggestion for Max 20x users when extra-usage is unavailable - Fixed permission dialogs stealing focus while actively typing - Fixed subagents not being able to access SDK-provided MCP tools because they were not synced to the shared application state - Fixed a regression where Windows users with a `.bashrc` file could not run bash commands - Improved memory usage for `--resume` (68% reduction for users with many sessions) by replacing the session index with lightweight stat-based loading and progressive enrichment - Improved `TaskStop` tool to display the stopped command/task description in the result line instead of a generic "Task stopped" message - Changed `/model` to execute immediately instead of being queued - [VSCode] Added multiline input support to the "Other" text input in question dialogs (use Shift+Enter for new lines) - [VSCode] Fixed duplicate sessions appearing in the session list when starting a new conversation ## 2.1.29 - Fixed startup performance issues when resuming sessions that have `saved_hook_context` ## 2.1.27 - Added tool call failures and denials to debug logs - Fixed context management validation error for gateway users, ensuring `CLAUDE_CODE_DISABLE_EXPERIMENTAL_BETAS=1` avoids the error - Added `--from-pr` flag to resume sessions linked to a specific GitHub PR number or URL - Sessions are now automatically linked to PRs when created via `gh pr create` - Fixed /context command not displaying colored output - Fixed status bar duplicating background task indicator when PR status was shown - Windows: Fixed bash command execution failing for users with `.bashrc` files - Windows: Fixed console windows flashing when spawning child processes - VSCode: Fixed OAuth token expiration causing 401 errors after extended sessions ## 2.1.25 - Fixed beta header validation error for gateway users on Bedrock and Vertex, ensuring `CLAUDE_CODE_DISABLE_EXPERIMENTAL_BETAS=1` avoids the error ## 2.1.23 - Added customizable spinner verbs setting (`spinnerVerbs`) - Fixed mTLS and proxy connectivity for users behind corporate proxies or using client certificates - Fixed per-user temp directory isolation to prevent permission conflicts on shared systems - Fixed a race condition that could cause 400 errors when prompt caching scope was enabled - Fixed pending async hooks not being cancelled when headless streaming sessions ended - Fixed tab completion not updating the input field when accepting a suggestion - Fixed ripgrep search timeouts silently returning empty results instead of reporting errors - Improved terminal rendering performance with optimized screen data layout - Changed Bash commands to show timeout duration alongside elapsed time - Changed merged pull requests to show a purple status indicator in the prompt footer - [IDE] Fixed model options displaying incorrect region strings for Bedrock users in headless mode ## 2.1.22 - Fixed structured outputs for non-interactive (-p) mode ## 2.1.21 - Added support for full-width (zenkaku) number input from Japanese IME in option selection prompts - Fixed shell completion cache files being truncated on exit - Fixed API errors when resuming sessions that were interrupted during tool execution - Fixed auto-compact triggering too early on models with large output token limits - Fixed task IDs potentially being reused after deletion - Fixed file search not working in VS Code extension on Windows - Improved read/search progress indicators to show "Reading…" while in progress and "Read" when complete - Improved Claude to prefer file operation tools (Read, Edit, Write) over bash equivalents (cat, sed, awk) - [VSCode] Added automatic Python virtual environment activation, ensuring `python` and `pip` commands use the correct interpreter (configurable via `claudeCode.usePythonEnvironment` setting) - [VSCode] Fixed message action buttons having incorrect background colors ## 2.1.20 - Added arrow key history navigation in vim normal mode when cursor cannot move further - Added external editor shortcut (Ctrl+G) to the help menu for better discoverability - Added PR review status indicator to the prompt footer, showing the current branch's PR state (approved, changes requested, pending, or draft) as a colored dot with a clickable link - Added support for loading `CLAUDE.md` files from additional directories specified via `--add-dir` flag (requires setting `CLAUDE_CODE_ADDITIONAL_DIRECTORIES_CLAUDE_MD=1`) - Added ability to delete tasks via the `TaskUpdate` tool - Fixed session compaction issues that could cause resume to load full history instead of the compact summary - Fixed agents sometimes ignoring user messages sent while actively working on a task - Fixed wide character (emoji, CJK) rendering artifacts where trailing columns were not cleared when replaced by narrower characters - Fixed JSON parsing errors when MCP tool responses contain special Unicode characters - Fixed up/down arrow keys in multi-line and wrapped text input to prioritize cursor movement over history navigation - Fixed draft prompt being lost when pressing UP arrow to navigate command history - Fixed ghost text flickering when typing slash commands mid-input - Fixed marketplace source removal not properly deleting settings - Fixed duplicate output in some commands like `/context` - Fixed task list sometimes showing outside the main conversation view - Fixed syntax highlighting for diffs occurring within multiline constructs like Python docstrings - Fixed crashes when cancelling tool use - Improved `/sandbox` command UI to show dependency status with installation instructions when dependencies are missing - Improved thinking status text with a subtle shimmer animation - Improved task list to dynamically adjust visible items based on terminal height - Improved fork conversation hint to show how to resume the original session - Changed collapsed read/search groups to show present tense ("Reading", "Searching for") while in progress, and past tense ("Read", "Searched for") when complete - Changed `ToolSearch` results to appear as a brief notification instead of inline in the conversation - Changed the `/commit-push-pr` skill to automatically post PR URLs to Slack channels when configured via MCP tools - Changed the `/copy` command to be available to all users - Changed background agents to prompt for tool permissions before launching - Changed permission rules like `Bash(*)` to be accepted and treated as equivalent to `Bash` - Changed config backups to be timestamped and rotated (keeping 5 most recent) to prevent data loss ## 2.1.19 - Added env var `CLAUDE_CODE_ENABLE_TASKS`, set to `false` to keep the old system temporarily - Added shorthand `$0`, `$1`, etc. for accessing individual arguments in custom commands - Fixed crashes on processors without AVX instruction support - Fixed dangling Claude Code processes when terminal is closed by catching EIO errors from `process.exit()` and using SIGKILL as fallback - Fixed `/rename` and `/tag` not updating the correct session when resuming from a different directory (e.g., git worktrees) - Fixed resuming sessions by custom title not working when run from a different directory - Fixed pasted text content being lost when using prompt stash (Ctrl+S) and restore - Fixed agent list displaying "Sonnet (default)" instead of "Inherit (default)" for agents without an explicit model setting - Fixed backgrounded hook commands not returning early, potentially causing the session to wait on a process that was intentionally backgrounded - Fixed file write preview omitting empty lines - Changed skills without additional permissions or hooks to be allowed without requiring approval - Changed indexed argument syntax from `$ARGUMENTS.0` to `$ARGUMENTS[0]` (bracket syntax) - [SDK] Added replay of `queued_command` attachment messages as `SDKUserMessageReplay` events when `replayUserMessages` is enabled - [VSCode] Enabled session forking and rewind functionality for all users ## 2.1.18 - Added customizable keyboard shortcuts. Configure keybindings per context, create chord sequences, and personalize your workflow. Run `/keybindings` to get started. Learn more at https://code.claude.com/docs/en/keybindings ## 2.1.17 - Fixed crashes on processors without AVX instruction support ## 2.1.16 - Added new task management system, including new capabilities like dependency tracking - [VSCode] Added native plugin management support - [VSCode] Added ability for OAuth users to browse and resume remote Claude sessions from the Sessions dialog - Fixed out-of-memory crashes when resuming sessions with heavy subagent usage - Fixed an issue where the "context remaining" warning was not hidden after running `/compact` - Fixed session titles on the resume screen not respecting the user's language setting - [IDE] Fixed a race condition on Windows where the Claude Code sidebar view container would not appear on start ## 2.1.15 - Added deprecation notification for npm installations - run `claude install` or see https://docs.anthropic.com/en/docs/claude-code/getting-started for more options - Improved UI rendering performance with React Compiler - Fixed the "Context left until auto-compact" warning not disappearing after running `/compact` - Fixed MCP stdio server timeout not killing child process, which could cause UI freezes ## 2.1.14 - Added history-based autocomplete in bash mode (`!`) - type a partial command and press Tab to complete from your bash command history - Added search to installed plugins list - type to filter by name or description - Added support for pinning plugins to specific git commit SHAs, allowing marketplace entries to install exact versions - Fixed a regression where the context window blocking limit was calculated too aggressively, blocking users at ~65% context usage instead of the intended ~98% - Fixed memory issues that could cause crashes when running parallel subagents - Fixed memory leak in long-running sessions where stream resources were not cleaned up after shell commands completed - Fixed `@` symbol incorrectly triggering file autocomplete suggestions in bash mode - Fixed `@`-mention menu folder click behavior to navigate into directories instead of selecting them - Fixed `/feedback` command generating invalid GitHub issue URLs when description is very long - Fixed `/context` command to show the same token count and percentage as the status line in verbose mode - Fixed an issue where `/config`, `/context`, `/model`, and `/todos` command overlays could close unexpectedly - Fixed slash command autocomplete selecting wrong command when typing similar commands (e.g., `/context` vs `/compact`) - Fixed inconsistent back navigation in plugin marketplace when only one marketplace is configured - Fixed iTerm2 progress bar not clearing properly on exit, preventing lingering indicators and bell sounds - Improved backspace to delete pasted text as a single token instead of one character at a time - [VSCode] Added `/usage` command to display current plan usage ## 2.1.12 - Fixed message rendering bug ## 2.1.11 - Fixed excessive MCP connection requests for HTTP/SSE transports ## 2.1.10 - Added new `Setup` hook event that can be triggered via `--init`, `--init-only`, or `--maintenance` CLI flags for repository setup and maintenance operations - Added keyboard shortcut 'c' to copy OAuth URL when browser doesn't open automatically during login - Fixed a crash when running bash commands containing heredocs with JavaScript template literals like `${index + 1}` - Improved startup to capture keystrokes typed before the REPL is fully ready - Improved file suggestions to show as removable attachments instead of inserting text when accepted - [VSCode] Added install count display to plugin listings - [VSCode] Added trust warning when installing plugins ## 2.1.9 - Added `auto:N` syntax for configuring the MCP tool search auto-enable threshold, where N is the context window percentage (0-100) - Added `plansDirectory` setting to customize where plan files are stored - Added external editor support (Ctrl+G) in AskUserQuestion "Other" input field - Added session URL attribution to commits and PRs created from web sessions - Added support for `PreToolUse` hooks to return `additionalContext` to the model - Added `${CLAUDE_SESSION_ID}` string substitution for skills to access the current session ID - Fixed long sessions with parallel tool calls failing with an API error about orphan tool_result blocks - Fixed MCP server reconnection hanging when cached connection promise never resolves - Fixed Ctrl+Z suspend not working in terminals using Kitty keyboard protocol (Ghostty, iTerm2, kitty, WezTerm) ## 2.1.7 - Added `showTurnDuration` setting to hide turn duration messages (e.g., "Cooked for 1m 6s") - Added ability to provide feedback when accepting permission prompts - Added inline display of agent's final response in task notifications, making it easier to see results without reading the full transcript file - Fixed security vulnerability where wildcard permission rules could match compound commands containing shell operators - Fixed false "file modified" errors on Windows when cloud sync tools, antivirus scanners, or Git touch file timestamps without changing content - Fixed orphaned tool_result errors when sibling tools fail during streaming execution - Fixed context window blocking limit being calculated using the full context window instead of the effective context window (which reserves space for max output tokens) - Fixed spinner briefly flashing when running local slash commands like `/model` or `/theme` - Fixed terminal title animation jitter by using fixed-width braille characters - Fixed plugins with git submodules not being fully initialized when installed - Fixed bash commands failing on Windows when temp directory paths contained characters like `t` or `n` that were misinterpreted as escape sequences - Improved typing responsiveness by reducing memory allocation overhead in terminal rendering - Enabled MCP tool search auto mode by default for all users. When MCP tool descriptions exceed 10% of the context window, they are automatically deferred and discovered via the MCPSearch tool instead of being loaded upfront. This reduces context usage for users with many MCP tools configured. Users can disable this by adding `MCPSearch` to `disallowedTools` in their settings. - Changed OAuth and API Console URLs from console.anthropic.com to platform.claude.com - [VSCode] Fixed `claudeProcessWrapper` setting passing the wrapper path instead of the Claude binary path ## 2.1.6 - Added search functionality to `/config` command for quickly filtering settings - Added Updates section to `/doctor` showing auto-update channel and available npm versions (stable/latest) - Added date range filtering to `/stats` command - press `r` to cycle between Last 7 days, Last 30 days, and All time - Added automatic discovery of skills from nested `.claude/skills` directories when working with files in subdirectories - Added `context_window.used_percentage` and `context_window.remaining_percentage` fields to status line input for easier context window display - Added an error display when the editor fails during Ctrl+G - Fixed permission bypass via shell line continuation that could allow blocked commands to execute - Fixed false "File has been unexpectedly modified" errors when file watchers touch files without changing content - Fixed text styling (bold, colors) getting progressively misaligned in multi-line responses - Fixed the feedback panel closing unexpectedly when typing 'n' in the description field - Fixed rate limit warning appearing at low usage after weekly reset (now requires 70% usage) - Fixed rate limit options menu incorrectly auto-opening when resuming a previous session - Fixed numpad keys outputting escape sequences instead of characters in Kitty keyboard protocol terminals - Fixed Option+Return not inserting newlines in Kitty keyboard protocol terminals - Fixed corrupted config backup files accumulating in the home directory (now only one backup is created per config file) - Fixed `mcp list` and `mcp get` commands leaving orphaned MCP server processes - Fixed visual artifacts in ink2 mode when nodes become hidden via `display:none` - Improved the external CLAUDE.md imports approval dialog to show which files are being imported and from where - Improved the `/tasks` dialog to go directly to task details when there's only one background task running - Improved @ autocomplete with icons for different suggestion types and single-line formatting - Updated "Help improve Claude" setting fetch to refresh OAuth and retry when it fails due to a stale OAuth token - Changed task notification display to cap at 3 lines with overflow summary when multiple background tasks complete simultaneously - Changed terminal title to "Claude Code" on startup for better window identification - Removed ability to @-mention MCP servers to enable/disable - use `/mcp enable ` instead - [VSCode] Fixed usage indicator not updating after manual compact ## 2.1.5 - Added `CLAUDE_CODE_TMPDIR` environment variable to override the temp directory used for internal temp files, useful for environments with custom temp directory requirements ## 2.1.4 - Added `CLAUDE_CODE_DISABLE_BACKGROUND_TASKS` environment variable to disable all background task functionality including auto-backgrounding and the Ctrl+B shortcut - Fixed "Help improve Claude" setting fetch to refresh OAuth and retry when it fails due to a stale OAuth token ## 2.1.3 - Merged slash commands and skills, simplifying the mental model with no change in behavior - Added release channel (`stable` or `latest`) toggle to `/config` - Added detection and warnings for unreachable permission rules, with warnings in `/doctor` and after saving rules that include the source of each rule and actionable fix guidance - Fixed plan files persisting across `/clear` commands, now ensuring a fresh plan file is used after clearing a conversation - Fixed false skill duplicate detection on filesystems with large inodes (e.g., ExFAT) by using 64-bit precision for inode values - Fixed mismatch between background task count in status bar and items shown in tasks dialog - Fixed sub-agents using the wrong model during conversation compaction - Fixed web search in sub-agents using incorrect model - Fixed trust dialog acceptance when running from the home directory not enabling trust-requiring features like hooks during the session - Improved terminal rendering stability by preventing uncontrolled writes from corrupting cursor state - Improved slash command suggestion readability by truncating long descriptions to 2 lines - Changed tool hook execution timeout from 60 seconds to 10 minutes - [VSCode] Added clickable destination selector for permission requests, allowing you to choose where settings are saved (this project, all projects, shared with team, or session only) ## 2.1.2 - Added source path metadata to images dragged onto the terminal, helping Claude understand where images originated - Added clickable hyperlinks for file paths in tool output in terminals that support OSC 8 (like iTerm) - Added support for Windows Package Manager (winget) installations with automatic detection and update instructions - Added Shift+Tab keyboard shortcut in plan mode to quickly select "auto-accept edits" option - Added `FORCE_AUTOUPDATE_PLUGINS` environment variable to allow plugin autoupdate even when the main auto-updater is disabled - Added `agent_type` to SessionStart hook input, populated if `--agent` is specified - Fixed a command injection vulnerability in bash command processing where malformed input could execute arbitrary commands - Fixed a memory leak where tree-sitter parse trees were not being freed, causing WASM memory to grow unbounded over long sessions - Fixed binary files (images, PDFs, etc.) being accidentally included in memory when using `@include` directives in CLAUDE.md files - Fixed updates incorrectly claiming another installation is in progress - Fixed crash when socket files exist in watched directories (defense-in-depth for EOPNOTSUPP errors) - Fixed remote session URL and teleport being broken when using `/tasks` command - Fixed MCP tool names being exposed in analytics events by sanitizing user-specific server configurations - Improved Option-as-Meta hint on macOS to show terminal-specific instructions for native CSIu terminals like iTerm2, Kitty, and WezTerm - Improved error message when pasting images over SSH to suggest using `scp` instead of the unhelpful clipboard shortcut hint - Improved permission explainer to not flag routine dev workflows (git fetch/rebase, npm install, tests, PRs) as medium risk - Changed large bash command outputs to be saved to disk instead of truncated, allowing Claude to read the full content - Changed large tool outputs to be persisted to disk instead of truncated, providing full output access via file references - Changed `/plugins` installed tab to unify plugins and MCPs with scope-based grouping - Deprecated Windows managed settings path `C:\ProgramData\ClaudeCode\managed-settings.json` - administrators should migrate to `C:\Program Files\ClaudeCode\managed-settings.json` - [SDK] Changed minimum zod peer dependency to ^4.0.0 - [VSCode] Fixed usage display not updating after manual compact ## 2.1.0 - Added automatic skill hot-reload - skills created or modified in `~/.claude/skills` or `.claude/skills` are now immediately available without restarting the session - Added support for running skills and slash commands in a forked sub-agent context using `context: fork` in skill frontmatter - Added support for `agent` field in skills to specify agent type for execution - Added `language` setting to configure Claude's response language (e.g., language: "japanese") - Changed Shift+Enter to work out of the box in iTerm2, WezTerm, Ghostty, and Kitty without modifying terminal configs - Added `respectGitignore` support in `settings.json` for per-project control over @-mention file picker behavior - Added `IS_DEMO` environment variable to hide email and organization from the UI, useful for streaming or recording sessions - Fixed security issue where sensitive data (OAuth tokens, API keys, passwords) could be exposed in debug logs - Fixed files and skills not being properly discovered when resuming sessions with `-c` or `--resume` - Fixed pasted content being lost when replaying prompts from history using up arrow or Ctrl+R search - Fixed Esc key with queued prompts to only move them to input without canceling the running task - Reduced permission prompts for complex bash commands - Fixed command search to prioritize exact and prefix matches on command names over fuzzy matches in descriptions - Fixed PreToolUse hooks to allow `updatedInput` when returning `ask` permission decision, enabling hooks to act as middleware while still requesting user consent - Fixed plugin path resolution for file-based marketplace sources - Fixed LSP tool being incorrectly enabled when no LSP servers were configured - Fixed background tasks failing with "git repository not found" error for repositories with dots in their names - Fixed Claude in Chrome support for WSL environments - Fixed Windows native installer silently failing when executable creation fails - Improved CLI help output to display options and subcommands in alphabetical order for easier navigation - Added wildcard pattern matching for Bash tool permissions using `*` at any position in rules (e.g., `Bash(npm *)`, `Bash(* install)`, `Bash(git * main)`) - Added unified Ctrl+B backgrounding for both bash commands and agents - pressing Ctrl+B now backgrounds all running foreground tasks simultaneously - Added support for MCP `list_changed` notifications, allowing MCP servers to dynamically update their available tools, prompts, and resources without requiring reconnection - Added `/teleport` and `/remote-env` slash commands for claude.ai subscribers, allowing them to resume and configure remote sessions - Added support for disabling specific agents using `Task(AgentName)` syntax in settings.json permissions or the `--disallowedTools` CLI flag - Added hooks support to agent frontmatter, allowing agents to define PreToolUse, PostToolUse, and Stop hooks scoped to the agent's lifecycle - Added hooks support for skill and slash command frontmatter - Added new Vim motions: `;` and `,` to repeat f/F/t/T motions, `y` operator for yank with `yy`/`Y`, `p`/`P` for paste, text objects (`iw`, `aw`, `iW`, `aW`, `i"`, `a"`, `i'`, `a'`, `i(`, `a(`, `i[`, `a[`, `i{`, `a{`), `>>` and `<<` for indent/dedent, and `J` to join lines - Added `/plan` command shortcut to enable plan mode directly from the prompt - Added slash command autocomplete support when `/` appears anywhere in input, not just at the beginning - Added `--tools` flag support in interactive mode to restrict which built-in tools Claude can use during interactive sessions - Added `CLAUDE_CODE_FILE_READ_MAX_OUTPUT_TOKENS` environment variable to override the default file read token limit - Added support for `once: true` config for hooks - Added support for YAML-style lists in frontmatter `allowed-tools` field for cleaner skill declarations - Added support for prompt and agent hook types from plugins (previously only command hooks were supported) - Added Cmd+V support for image paste in iTerm2 (maps to Ctrl+V) - Added left/right arrow key navigation for cycling through tabs in dialogs - Added real-time thinking block display in Ctrl+O transcript mode - Added filepath to full output in background bash task details dialog - Added Skills as a separate category in the context visualization - Fixed OAuth token refresh not triggering when server reports token expired but local expiration check disagrees - Fixed session persistence getting stuck after transient server errors by recovering from 409 conflicts when the entry was actually stored - Fixed session resume failures caused by orphaned tool results during concurrent tool execution - Fixed a race condition where stale OAuth tokens could be read from the keychain cache during concurrent token refresh attempts - Fixed AWS Bedrock subagents not inheriting EU/APAC cross-region inference model configuration, causing 403 errors when IAM permissions are scoped to specific regions - Fixed API context overflow when background tasks produce large output by truncating to 30K chars with file path reference - Fixed a hang when reading FIFO files by skipping symlink resolution for special file types - Fixed terminal keyboard mode not being reset on exit in Ghostty, iTerm2, Kitty, and WezTerm - Fixed Alt+B and Alt+F (word navigation) not working in iTerm2, Ghostty, Kitty, and WezTerm - Fixed `${CLAUDE_PLUGIN_ROOT}` not being substituted in plugin `allowed-tools` frontmatter, which caused tools to incorrectly require approval - Fixed files created by the Write tool using hardcoded 0o600 permissions instead of respecting the system umask - Fixed commands with `$()` command substitution failing with parse errors - Fixed multi-line bash commands with backslash continuations being incorrectly split and flagged for permissions - Fixed bash command prefix extraction to correctly identify subcommands after global options (e.g., `git -C /path log` now correctly matches `Bash(git log:*)` rules) - Fixed slash commands passed as CLI arguments (e.g., `claude /context`) not being executed properly - Fixed pressing Enter after Tab-completing a slash command selecting a different command instead of submitting the completed one - Fixed slash command argument hint flickering and inconsistent display when typing commands with arguments - Fixed Claude sometimes redundantly invoking the Skill tool when running slash commands directly - Fixed skill token estimates in `/context` to accurately reflect frontmatter-only loading - Fixed subagents sometimes not inheriting the parent's model by default - Fixed model picker showing incorrect selection for Bedrock/Vertex users using `--model haiku` - Fixed duplicate Bash commands appearing in permission request option labels - Fixed noisy output when background tasks complete - now shows clean completion message instead of raw output - Fixed background task completion notifications to appear proactively with bullet point - Fixed forked slash commands showing "AbortError" instead of "Interrupted" message when cancelled - Fixed cursor disappearing after dismissing permission dialogs - Fixed `/hooks` menu selecting wrong hook type when scrolling to a different option - Fixed images in queued prompts showing as "[object Object]" when pressing Esc to cancel - Fixed images being silently dropped when queueing messages while backgrounding a task - Fixed large pasted images failing with "Image was too large" error - Fixed extra blank lines in multiline prompts containing CJK characters (Japanese, Chinese, Korean) - Fixed ultrathink keyword highlighting being applied to wrong characters when user prompt text wraps to multiple lines - Fixed collapsed "Reading X files…" indicator incorrectly switching to past tense when thinking blocks appear mid-stream - Fixed Bash read commands (like `ls` and `cat`) not being counted in collapsed read/search groups, causing groups to incorrectly show "Read 0 files" - Fixed spinner token counter to properly accumulate tokens from subagents during execution - Fixed memory leak in git diff parsing where sliced strings retained large parent strings - Fixed race condition where LSP tool could return "no server available" during startup - Fixed feedback submission hanging indefinitely when network requests timeout - Fixed search mode in plugin discovery and log selector views exiting when pressing up arrow - Fixed hook success message showing trailing colon when hook has no output - Multiple optimizations to improve startup performance - Improved terminal rendering performance when using native installer or Bun, especially for text with emoji, ANSI codes, and Unicode characters - Improved performance when reading Jupyter notebooks with many cells - Improved reliability for piped input like `cat refactor.md | claude` - Improved reliability for AskQuestion tool - Improved sed in-place edit commands to render as file edits with diff preview - Improved Claude to automatically continue when response is cut off due to output token limit, instead of showing an error message - Improved compaction reliability - Improved subagents (Task tool) to continue working after permission denial, allowing them to try alternative approaches - Improved skills to show progress while executing, displaying tool uses as they happen - Improved skills from `/skills/` directories to be visible in the slash command menu by default (opt-out with `user-invocable: false` in frontmatter) - Improved skill suggestions to prioritize recently and frequently used skills - Improved spinner feedback when waiting for the first response token - Improved token count display in spinner to include tokens from background agents - Improved incremental output for async agents to give the main thread more control and visibility - Improved permission prompt UX with Tab hint moved to footer, cleaner Yes/No input labels with contextual placeholders - Improved the Claude in Chrome notification with shortened help text and persistent display until dismissed - Improved macOS screenshot paste reliability with TIFF format support - Improved `/stats` output - Updated Atlassian MCP integration to use a more reliable default configuration (streamable HTTP) - Changed "Interrupted" message color from red to grey for a less alarming appearance - Removed permission prompt when entering plan mode - users can now enter plan mode without approval - Removed underline styling from image reference links - [SDK] Changed minimum zod peer dependency to ^4.0.0 - [VSCode] Added currently selected model name to the context menu - [VSCode] Added descriptive labels on auto-accept permission button (e.g., "Yes, allow npm for this project" instead of "Yes, and don't ask again") - [VSCode] Fixed paragraph breaks not rendering in markdown content - [VSCode] Fixed scrolling in the extension inadvertently scrolling the parent iframe - [Windows] Fixed issue with improper rendering ## 2.0.76 - Fixed issue with macOS code-sign warning when using Claude in Chrome integration ## 2.0.75 - Minor bugfixes ## 2.0.74 - Added LSP (Language Server Protocol) tool for code intelligence features like go-to-definition, find references, and hover documentation - Added `/terminal-setup` support for Kitty, Alacritty, Zed, and Warp terminals - Added ctrl+t shortcut in `/theme` to toggle syntax highlighting on/off - Added syntax highlighting info to theme picker - Added guidance for macOS users when Alt shortcuts fail due to terminal configuration - Fixed skill `allowed-tools` not being applied to tools invoked by the skill - Fixed Opus 4.5 tip incorrectly showing when user was already using Opus - Fixed a potential crash when syntax highlighting isn't initialized correctly - Fixed visual bug in `/plugins discover` where list selection indicator showed while search box was focused - Fixed macOS keyboard shortcuts to display 'opt' instead of 'alt' - Improved `/context` command visualization with grouped skills and agents by source, slash commands, and sorted token count - [Windows] Fixed issue with improper rendering - [VSCode] Added gift tag pictogram for year-end promotion message ## 2.0.73 - Added clickable `[Image #N]` links that open attached images in the default viewer - Added alt-y yank-pop to cycle through kill ring history after ctrl-y yank - Added search filtering to the plugin discover screen (type to filter by name, description, or marketplace) - Added support for custom session IDs when forking sessions with `--session-id` combined with `--resume` or `--continue` and `--fork-session` - Fixed slow input history cycling and race condition that could overwrite text after message submission - Improved `/theme` command to open theme picker directly - Improved theme picker UI - Improved search UX across resume session, permissions, and plugins screens with a unified SearchBox component - [VSCode] Added tab icon badges showing pending permissions (blue) and unread completions (orange) ## 2.0.72 - Added Claude in Chrome (Beta) feature that works with the Chrome extension (https://claude.ai/chrome) to let you control your browser directly from Claude Code - Reduced terminal flickering - Added scannable QR code to mobile app tip for quick app downloads - Added loading indicator when resuming conversations for better feedback - Fixed `/context` command not respecting custom system prompts in non-interactive mode - Fixed order of consecutive Ctrl+K lines when pasting with Ctrl+Y - Improved @ mention file suggestion speed (~3× faster in git repositories) - Improved file suggestion performance in repos with `.ignore` or `.rgignore` files - Improved settings validation errors to be more prominent - Changed thinking toggle from Tab to Alt+T to avoid accidental triggers ## 2.0.71 - Added /config toggle to enable/disable prompt suggestions - Added `/settings` as an alias for the `/config` command - Fixed @ file reference suggestions incorrectly triggering when cursor is in the middle of a path - Fixed MCP servers from `.mcp.json` not loading when using `--dangerously-skip-permissions` - Fixed permission rules incorrectly rejecting valid bash commands containing shell glob patterns (e.g., `ls *.txt`, `for f in *.png`) - Bedrock: Environment variable `ANTHROPIC_BEDROCK_BASE_URL` is now respected for token counting and inference profile listing - New syntax highlighting engine for native build ## 2.0.70 - Added Enter key to accept and submit prompt suggestions immediately (tab still accepts for editing) - Added wildcard syntax `mcp__server__*` for MCP tool permissions to allow or deny all tools from a server - Added auto-update toggle for plugin marketplaces, allowing per-marketplace control over automatic updates - Added `current_usage` field to status line input, enabling accurate context window percentage calculations - Fixed input being cleared when processing queued commands while the user was typing - Fixed prompt suggestions replacing typed input when pressing Tab - Fixed diff view not updating when terminal is resized - Improved memory usage by 3x for large conversations - Improved resolution of stats screenshots copied to clipboard (Ctrl+S) for crisper images - Removed # shortcut for quick memory entry (tell Claude to edit your CLAUDE.md instead) - Fix thinking mode toggle in /config not persisting correctly - Improve UI for file creation permission dialog ## 2.0.69 - Minor bugfixes ## 2.0.68 - Fixed IME (Input Method Editor) support for languages like Chinese, Japanese, and Korean by correctly positioning the composition window at the cursor - Fixed a bug where disallowed MCP tools were visible to the model - Fixed an issue where steering messages could be lost while a subagent is working - Fixed Option+Arrow word navigation treating entire CJK (Chinese, Japanese, Korean) text sequences as a single word instead of navigating by word boundaries - Improved plan mode exit UX: show simplified yes/no dialog when exiting with empty or missing plan instead of throwing an error - Add support for enterprise managed settings. Contact your Anthropic account team to enable this feature. ## 2.0.67 - Thinking mode is now enabled by default for Opus 4.5 - Thinking mode configuration has moved to /config - Added search functionality to `/permissions` command with `/` keyboard shortcut for filtering rules by tool name - Show reason why autoupdater is disabled in `/doctor` - Fixed false "Another process is currently updating Claude" error when running `claude update` while another instance is already on the latest version - Fixed MCP servers from `.mcp.json` being stuck in pending state when running in non-interactive mode (`-p` flag or piped input) - Fixed scroll position resetting after deleting a permission rule in `/permissions` - Fixed word deletion (opt+delete) and word navigation (opt+arrow) not working correctly with non-Latin text such as Cyrillic, Greek, Arabic, Hebrew, Thai, and Chinese - Fixed `claude install --force` not bypassing stale lock files - Fixed consecutive @~/ file references in CLAUDE.md being incorrectly parsed due to markdown strikethrough interference - Windows: Fixed plugin MCP servers failing due to colons in log directory paths ## 2.0.65 - Added ability to switch models while writing a prompt using alt+p (linux, windows), option+p (macos). - Added context window information to status line input - Added `fileSuggestion` setting for custom `@` file search commands - Added `CLAUDE_CODE_SHELL` environment variable to override automatic shell detection (useful when login shell differs from actual working shell) - Fixed prompt not being saved to history when aborting a query with Escape - Fixed Read tool image handling to identify format from bytes instead of file extension ## 2.0.64 - Made auto-compacting instant - Agents and bash commands can run asynchronously and send messages to wake up the main agent - /stats now provides users with interesting CC stats, such as favorite model, usage graph, usage streak - Added named session support: use `/rename` to name sessions, `/resume ` in REPL or `claude --resume ` from the terminal to resume them - Added support for .claude/rules/`. See https://code.claude.com/docs/en/memory for details. - Added image dimension metadata when images are resized, enabling accurate coordinate mappings for large images - Fixed auto-loading .env when using native installer - Fixed `--system-prompt` being ignored when using `--continue` or `--resume` flags - Improved `/resume` screen with grouped forked sessions and keyboard shortcuts for preview (P) and rename (R) - VSCode: Added copy-to-clipboard button on code blocks and bash tool inputs - VSCode: Fixed extension not working on Windows ARM64 by falling back to x64 binary via emulation - Bedrock: Improve efficiency of token counting - Bedrock: Add support for `aws login` AWS Management Console credentials - Unshipped AgentOutputTool and BashOutputTool, in favor of a new unified TaskOutputTool ## 2.0.62 - Added "(Recommended)" indicator for multiple-choice questions, with the recommended option moved to the top of the list - Added `attribution` setting to customize commit and PR bylines (deprecates `includeCoAuthoredBy`) - Fixed duplicate slash commands appearing when ~/.claude is symlinked to a project directory - Fixed slash command selection not working when multiple commands share the same name - Fixed an issue where skill files inside symlinked skill directories could become circular symlinks - Fixed running versions getting removed because lock file incorrectly going stale - Fixed IDE diff tab not closing when rejecting file changes ## 2.0.61 - Reverted VSCode support for multiple terminal clients due to responsiveness issues. ## 2.0.60 - Added background agent support. Agents run in the background while you work - Added --disable-slash-commands CLI flag to disable all slash commands - Added model name to "Co-Authored-By" commit messages - Enabled "/mcp enable [server-name]" or "/mcp disable [server-name]" to quickly toggle all servers - Updated Fetch to skip summarization for pre-approved websites - VSCode: Added support for multiple terminal clients connecting to the IDE server simultaneously ## 2.0.59 - Added --agent CLI flag to override the agent setting for the current session - Added `agent` setting to configure main thread with a specific agent's system prompt, tool restrictions, and model - VS Code: Fixed .claude.json config file being read from incorrect location ## 2.0.58 - Pro users now have access to Opus 4.5 as part of their subscription! - Fixed timer duration showing "11m 60s" instead of "12m 0s" - Windows: Managed settings now prefer `C:\Program Files\ClaudeCode` if it exists. Support for `C:\ProgramData\ClaudeCode` will be removed in a future version. ## 2.0.57 - Added feedback input when rejecting plans, allowing users to tell Claude what to change - VSCode: Added streaming message support for real-time response display ## 2.0.56 - Added setting to enable/disable terminal progress bar (OSC 9;4) - VSCode Extension: Added support for VS Code's secondary sidebar (VS Code 1.97+), allowing Claude Code to be displayed in the right sidebar while keeping the file explorer on the left. Requires setting sidebar as Preferred Location in the config. ## 2.0.55 - Fixed proxy DNS resolution being forced on by default. Now opt-in via `CLAUDE_CODE_PROXY_RESOLVES_HOSTS=true` environment variable - Fixed keyboard navigation becoming unresponsive when holding down arrow keys in memory location selector - Improved AskUserQuestion tool to auto-submit single-select questions on the last question, eliminating the extra review screen for simple question flows - Improved fuzzy matching for `@` file suggestions with faster, more accurate results ## 2.0.54 - Hooks: Enable PermissionRequest hooks to process 'always allow' suggestions and apply permission updates - Fix issue with excessive iTerm notifications ## 2.0.52 - Fixed duplicate message display when starting Claude with a command line argument - Fixed `/usage` command progress bars to fill up as usage increases (instead of showing remaining percentage) - Fixed image pasting not working on Linux systems running Wayland (now falls back to wl-paste when xclip is unavailable) - Permit some uses of `$!` in bash commands ## 2.0.51 - Added Opus 4.5! https://www.anthropic.com/news/claude-opus-4-5 - Introducing Claude Code for Desktop: https://claude.com/download - To give you room to try out our new model, we've updated usage limits for Claude Code users. See the Claude Opus 4.5 blog for full details - Pro users can now purchase extra usage for access to Opus 4.5 in Claude Code - Plan Mode now builds more precise plans and executes more thoroughly - Usage limit notifications now easier to understand - Switched `/usage` back to "% used" - Fixed handling of thinking errors - Fixed performance regression ## 2.0.50 - Fixed bug preventing calling MCP tools that have nested references in their input schemas - Silenced a noisy but harmless error during upgrades - Improved ultrathink text display - Improved clarity of 5-hour session limit warning message ## 2.0.49 - Added readline-style ctrl-y for pasting deleted text - Improved clarity of usage limit warning message - Fixed handling of subagent permissions ## 2.0.47 - Improved error messages and validation for `claude --teleport` - Improved error handling in `/usage` - Fixed race condition with history entry not getting logged at exit - Fixed Vertex AI configuration not being applied from `settings.json` ## 2.0.46 - Fixed image files being reported with incorrect media type when format cannot be detected from metadata ## 2.0.45 - Added support for Microsoft Foundry! See https://code.claude.com/docs/en/azure-ai-foundry - Added `PermissionRequest` hook to automatically approve or deny tool permission requests with custom logic - Send background tasks to Claude Code on the web by starting a message with `&` ## 2.0.43 - Added `permissionMode` field for custom agents - Added `tool_use_id` field to `PreToolUseHookInput` and `PostToolUseHookInput` types - Added skills frontmatter field to declare skills to auto-load for subagents - Added the `SubagentStart` hook event - Fixed nested `CLAUDE.md` files not loading when @-mentioning files - Fixed duplicate rendering of some messages in the UI - Fixed some visual flickers - Fixed NotebookEdit tool inserting cells at incorrect positions when cell IDs matched the pattern `cell-N` ## 2.0.42 - Added `agent_id` and `agent_transcript_path` fields to `SubagentStop` hooks. ## 2.0.41 - Added `model` parameter to prompt-based stop hooks, allowing users to specify a custom model for hook evaluation - Fixed slash commands from user settings being loaded twice, which could cause rendering issues - Fixed incorrect labeling of user settings vs project settings in command descriptions - Fixed crash when plugin command hooks timeout during execution - Fixed: Bedrock users no longer see duplicate Opus entries in the /model picker when using `--model haiku` - Fixed broken security documentation links in trust dialogs and onboarding - Fixed issue where pressing ESC to close the diff modal would also interrupt the model - ctrl-r history search landing on a slash command no longer cancels the search - SDK: Support custom timeouts for hooks - Allow more safe git commands to run without approval - Plugins: Added support for sharing and installing output styles - Teleporting a session from web will automatically set the upstream branch ## 2.0.37 - Fixed how idleness is computed for notifications - Hooks: Added matcher values for Notification hook events - Output Styles: Added `keep-coding-instructions` option to frontmatter ## 2.0.36 - Fixed: DISABLE_AUTOUPDATER environment variable now properly disables package manager update notifications - Fixed queued messages being incorrectly executed as bash commands - Fixed input being lost when typing while a queued message is processed ## 2.0.35 - Improve fuzzy search results when searching commands - Improved VS Code extension to respect `chat.fontSize` and `chat.fontFamily` settings throughout the entire UI, and apply font changes immediately without requiring reload - Added `CLAUDE_CODE_EXIT_AFTER_STOP_DELAY` environment variable to automatically exit SDK mode after a specified idle duration, useful for automated workflows and scripts - Migrated `ignorePatterns` from project config to deny permissions in the localSettings. - Fixed menu navigation getting stuck on items with empty string or other falsy values (e.g., in the `/hooks` menu) ## 2.0.34 - VSCode Extension: Added setting to configure the initial permission mode for new conversations - Improved file path suggestion performance with native Rust-based fuzzy finder - Fixed infinite token refresh loop that caused MCP servers with OAuth (e.g., Slack) to hang during connection - Fixed memory crash when reading or writing large files (especially base64-encoded images) ## 2.0.33 - Native binary installs now launch quicker. - Fixed `claude doctor` incorrectly detecting Homebrew vs npm-global installations by properly resolving symlinks - Fixed `claude mcp serve` exposing tools with incompatible outputSchemas ## 2.0.32 - Un-deprecate output styles based on community feedback - Added `companyAnnouncements` setting for displaying announcements on startup - Fixed hook progress messages not updating correctly during PostToolUse hook execution ## 2.0.31 - Windows: native installation uses shift+tab as shortcut for mode switching, instead of alt+m - Vertex: add support for Web Search on supported models - VSCode: Adding the respectGitIgnore configuration to include .gitignored files in file searches (defaults to true) - Fixed a bug with subagents and MCP servers related to "Tool names must be unique" error - Fixed issue causing `/compact` to fail with `prompt_too_long` by making it respect existing compact boundaries - Fixed plugin uninstall not removing plugins ## 2.0.30 - Added helpful hint to run `security unlock-keychain` when encountering API key errors on macOS with locked keychain - Added `allowUnsandboxedCommands` sandbox setting to disable the dangerouslyDisableSandbox escape hatch at policy level - Added `disallowedTools` field to custom agent definitions for explicit tool blocking - Added prompt-based stop hooks - VSCode: Added respectGitIgnore configuration to include .gitignored files in file searches (defaults to true) - Enabled SSE MCP servers on native build - Deprecated output styles. Review options in `/output-style` and use --system-prompt-file, --system-prompt, --append-system-prompt, CLAUDE.md, or plugins instead - Removed support for custom ripgrep configuration, resolving an issue where Search returns no results and config discovery fails - Fixed Explore agent creating unwanted .md investigation files during codebase exploration - Fixed a bug where `/context` would sometimes fail with "max_tokens must be greater than thinking.budget_tokens" error message - Fixed `--mcp-config` flag to correctly override file-based MCP configurations - Fixed bug that saved session permissions to local settings - Fixed MCP tools not being available to sub-agents - Fixed hooks and plugins not executing when using --dangerously-skip-permissions flag - Fixed delay when navigating through typeahead suggestions with arrow keys - VSCode: Restored selection indicator in input footer showing current file or code selection status ## 2.0.28 - Plan mode: introduced new Plan subagent - Subagents: claude can now choose to resume subagents - Subagents: claude can dynamically choose the model used by its subagents - SDK: added --max-budget-usd flag - Discovery of custom slash commands, subagents, and output styles no longer respects .gitignore - Stop `/terminal-setup` from adding backslash to `Shift + Enter` in VS Code - Add branch and tag support for git-based plugins and marketplaces using fragment syntax (e.g., `owner/repo#branch`) - Fixed a bug where macOS permission prompts would show up upon initial launch when launching from home directory - Various other bug fixes ## 2.0.27 - New UI for permission prompts - Added current branch filtering and search to session resume screen for easier navigation - Fixed directory @-mention causing "No assistant message found" error - VSCode Extension: Add config setting to include .gitignored files in file searches - VSCode Extension: Bug fixes for unrelated 'Warmup' conversations, and configuration/settings occasionally being reset to defaults ## 2.0.25 - Removed legacy SDK entrypoint. Please migrate to @anthropic-ai/claude-agent-sdk for future SDK updates: https://platform.claude.com/docs/en/agent-sdk/migration-guide ## 2.0.24 - Fixed a bug where project-level skills were not loading when --setting-sources 'project' was specified - Claude Code Web: Support for Web -> CLI teleport - Sandbox: Releasing a sandbox mode for the BashTool on Linux & Mac - Bedrock: Display awsAuthRefresh output when auth is required ## 2.0.22 - Fixed content layout shift when scrolling through slash commands - IDE: Add toggle to enable/disable thinking. - Fix bug causing duplicate permission prompts with parallel tool calls - Add support for enterprise managed MCP allowlist and denylist ## 2.0.21 - Support MCP `structuredContent` field in tool responses - Added an interactive question tool - Claude will now ask you questions more often in plan mode - Added Haiku 4.5 as a model option for Pro users - Fixed an issue where queued commands don't have access to previous messages' output ## 2.0.20 - Added support for Claude Skills ## 2.0.19 - Auto-background long-running bash commands instead of killing them. Customize with BASH_DEFAULT_TIMEOUT_MS - Fixed a bug where Haiku was unnecessarily called in print mode ## 2.0.17 - Added Haiku 4.5 to model selector! - Haiku 4.5 automatically uses Sonnet in plan mode, and Haiku for execution (i.e. SonnetPlan by default) - 3P (Bedrock and Vertex) are not automatically upgraded yet. Manual upgrading can be done through setting `ANTHROPIC_DEFAULT_HAIKU_MODEL` - Introducing the Explore subagent. Powered by Haiku it'll search through your codebase efficiently to save context! - OTEL: support HTTP_PROXY and HTTPS_PROXY - `CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC` now disables release notes fetching ## 2.0.15 - Fixed bug with resuming where previously created files needed to be read again before writing - Fixed bug with `-p` mode where @-mentioned files needed to be read again before writing ## 2.0.14 - Fix @-mentioning MCP servers to toggle them on/off - Improve permission checks for bash with inline env vars - Fix ultrathink + thinking toggle - Reduce unnecessary logins - Document --system-prompt - Several improvements to rendering - Plugins UI polish ## 2.0.13 - Fixed `/plugin` not working on native build ## 2.0.12 - **Plugin System Released**: Extend Claude Code with custom commands, agents, hooks, and MCP servers from marketplaces - `/plugin install`, `/plugin enable/disable`, `/plugin marketplace` commands for plugin management - Repository-level plugin configuration via `extraKnownMarketplaces` for team collaboration - `/plugin validate` command for validating plugin structure and configuration - Plugin announcement blog post at https://www.anthropic.com/news/claude-code-plugins - Plugin documentation available at https://code.claude.com/docs/en/plugins - Comprehensive error messages and diagnostics via `/doctor` command - Avoid flickering in `/model` selector - Improvements to `/help` - Avoid mentioning hooks in `/resume` summaries - Changes to the "verbose" setting in `/config` now persist across sessions ## 2.0.11 - Reduced system prompt size by 1.4k tokens - IDE: Fixed keyboard shortcuts and focus issues for smoother interaction - Fixed Opus fallback rate limit errors appearing incorrectly - Fixed /add-dir command selecting wrong default tab ## 2.0.10 - Rewrote terminal renderer for buttery smooth UI - Enable/disable MCP servers by @mentioning, or in /mcp - Added tab completion for shell commands in bash mode - PreToolUse hooks can now modify tool inputs - Press Ctrl-G to edit your prompt in your system's configured text editor - Fixes for bash permission checks with environment variables in the command ## 2.0.9 - Fix regression where bash backgrounding stopped working ## 2.0.8 - Update Bedrock default Sonnet model to `global.anthropic.claude-sonnet-4-5-20250929-v1:0` - IDE: Add drag-and-drop support for files and folders in chat - /context: Fix counting for thinking blocks - Improve message rendering for users with light themes on dark terminals - Remove deprecated .claude.json allowedTools, ignorePatterns, env, and todoFeatureEnabled config options (instead, configure these in your settings.json) ## 2.0.5 - IDE: Fix IME unintended message submission with Enter and Tab - IDE: Add "Open in Terminal" link in login screen - Fix unhandled OAuth expiration 401 API errors - SDK: Added SDKUserMessageReplay.isReplay to prevent duplicate messages ## 2.0.1 - Skip Sonnet 4.5 default model setting change for Bedrock and Vertex - Various bug fixes and presentation improvements ## 2.0.0 - New native VS Code extension - Fresh coat of paint throughout the whole app - /rewind a conversation to undo code changes - /usage command to see plan limits - Tab to toggle thinking (sticky across sessions) - Ctrl-R to search history - Unshipped claude config command - Hooks: Reduced PostToolUse 'tool_use' ids were found without 'tool_result' blocks errors - SDK: The Claude Code SDK is now the Claude Agent SDK - Add subagents dynamically with `--agents` flag ## 1.0.126 - Enable /context command for Bedrock and Vertex - Add mTLS support for HTTP-based OpenTelemetry exporters ## 1.0.124 - Set `CLAUDE_BASH_NO_LOGIN` environment variable to 1 or true to to skip login shell for BashTool - Fix Bedrock and Vertex environment variables evaluating all strings as truthy - No longer inform Claude of the list of allowed tools when permission is denied - Fixed security vulnerability in Bash tool permission checks - Improved VSCode extension performance for large files ## 1.0.123 - Bash permission rules now support output redirections when matching (e.g., `Bash(python:*)` matches `python script.py > output.txt`) - Fixed thinking mode triggering on negation phrases like "don't think" - Fixed rendering performance degradation during token streaming - Added SlashCommand tool, which enables Claude to invoke your slash commands. https://code.claude.com/docs/en/slash-commands#SlashCommand-tool - Enhanced BashTool environment snapshot logging - Fixed a bug where resuming a conversation in headless mode would sometimes enable thinking unnecessarily - Migrated --debug logging to a file, to enable easy tailing & filtering ## 1.0.120 - Fix input lag during typing, especially noticeable with large prompts - Improved VSCode extension command registry and sessions dialog user experience - Enhanced sessions dialog responsiveness and visual feedback - Fixed IDE compatibility issue by removing worktree support check - Fixed security vulnerability where Bash tool permission checks could be bypassed using prefix matching ## 1.0.119 - Fix Windows issue where process visually freezes on entering interactive mode - Support dynamic headers for MCP servers via headersHelper configuration - Fix thinking mode not working in headless sessions - Fix slash commands now properly update allowed tools instead of replacing them ## 1.0.117 - Add Ctrl-R history search to recall previous commands like bash/zsh - Fix input lag while typing, especially on Windows - Add sed command to auto-allowed commands in acceptEdits mode - Fix Windows PATH comparison to be case-insensitive for drive letters - Add permissions management hint to /add-dir output ## 1.0.115 - Improve thinking mode display with enhanced visual effects - Type /t to temporarily disable thinking mode in your prompt - Improve path validation for glob and grep tools - Show condensed output for post-tool hooks to reduce visual clutter - Fix visual feedback when loading state completes - Improve UI consistency for permission request dialogs ## 1.0.113 - Deprecated piped input in interactive mode - Move Ctrl+R keybinding for toggling transcript to Ctrl+O ## 1.0.112 - Transcript mode (Ctrl+R): Added the model used to generate each assistant message - Addressed issue where some Claude Max users were incorrectly recognized as Claude Pro users - Hooks: Added systemMessage support for SessionEnd hooks - Added `spinnerTipsEnabled` setting to disable spinner tips - IDE: Various improvements and bug fixes ## 1.0.111 - /model now validates provided model names - Fixed Bash tool crashes caused by malformed shell syntax parsing ## 1.0.110 - /terminal-setup command now supports WezTerm - MCP: OAuth tokens now proactively refresh before expiration - Fixed reliability issues with background Bash processes ## 1.0.109 - SDK: Added partial message streaming support via `--include-partial-messages` CLI flag ## 1.0.106 - Windows: Fixed path permission matching to consistently use POSIX format (e.g., `Read(//c/Users/...)`) ## 1.0.97 - Settings: /doctor now validates permission rule syntax and suggests corrections ## 1.0.94 - Vertex: add support for global endpoints for supported models - /memory command now allows direct editing of all imported memory files - SDK: Add custom tools as callbacks - Added /todos command to list current todo items ## 1.0.93 - Windows: Add alt + v shortcut for pasting images from clipboard - Support NO_PROXY environment variable to bypass proxy for specified hostnames and IPs ## 1.0.90 - Settings file changes take effect immediately - no restart required ## 1.0.88 - Fixed issue causing "OAuth authentication is currently not supported" - Status line input now includes `exceeds_200k_tokens` - Fixed incorrect usage tracking in /cost. - Introduced `ANTHROPIC_DEFAULT_SONNET_MODEL` and `ANTHROPIC_DEFAULT_OPUS_MODEL` for controlling model aliases opusplan, opus, and sonnet. - Bedrock: Updated default Sonnet model to Sonnet 4 ## 1.0.86 - Added /context to help users self-serve debug context issues - SDK: Added UUID support for all SDK messages - SDK: Added `--replay-user-messages` to replay user messages back to stdout ## 1.0.85 - Status line input now includes session cost info - Hooks: Introduced SessionEnd hook ## 1.0.84 - Fix tool_use/tool_result id mismatch error when network is unstable - Fix Claude sometimes ignoring real-time steering when wrapping up a task - @-mention: Add ~/.claude/\* files to suggestions for easier agent, output style, and slash command editing - Use built-in ripgrep by default; to opt out of this behavior, set USE_BUILTIN_RIPGREP=0 ## 1.0.83 - @-mention: Support files with spaces in path - New shimmering spinner ## 1.0.82 - SDK: Add request cancellation support - SDK: New additionalDirectories option to search custom paths, improved slash command processing - Settings: Validation prevents invalid fields in .claude/settings.json files - MCP: Improve tool name consistency - Bash: Fix crash when Claude tries to automatically read large files ## 1.0.81 - Released output styles, including new built-in educational output styles "Explanatory" and "Learning". Docs: https://code.claude.com/docs/en/output-styles - Agents: Fix custom agent loading when agent files are unparsable ## 1.0.80 - UI improvements: Fix text contrast for custom subagent colors and spinner rendering issues ## 1.0.77 - Bash tool: Fix heredoc and multiline string escaping, improve stderr redirection handling - SDK: Add session support and permission denial tracking - Fix token limit errors in conversation summarization - Opus Plan Mode: New setting in `/model` to run Opus only in plan mode, Sonnet otherwise ## 1.0.73 - MCP: Support multiple config files with `--mcp-config file1.json file2.json` - MCP: Press Esc to cancel OAuth authentication flows - Bash: Improved command validation and reduced false security warnings - UI: Enhanced spinner animations and status line visual hierarchy - Linux: Added support for Alpine and musl-based distributions (requires separate ripgrep installation) ## 1.0.72 - Ask permissions: have Claude Code always ask for confirmation to use specific tools with /permissions ## 1.0.71 - Background commands: (Ctrl-b) to run any Bash command in the background so Claude can keep working (great for dev servers, tailing logs, etc.) - Customizable status line: add your terminal prompt to Claude Code with /statusline ## 1.0.70 - Performance: Optimized message rendering for better performance with large contexts - Windows: Fixed native file search, ripgrep, and subagent functionality - Added support for @-mentions in slash command arguments ## 1.0.69 - Upgraded Opus to version 4.1 ## 1.0.68 - Fix incorrect model names being used for certain commands like `/pr-comments` - Windows: improve permissions checks for allow / deny tools and project trust. This may create a new project entry in `.claude.json` - manually merge the history field if desired. - Windows: improve sub-process spawning to eliminate "No such file or directory" when running commands like pnpm - Enhanced /doctor command with CLAUDE.md and MCP tool context for self-serve debugging - SDK: Added canUseTool callback support for tool confirmation - Added `disableAllHooks` setting - Improved file suggestions performance in large repos ## 1.0.65 - IDE: Fixed connection stability issues and error handling for diagnostics - Windows: Fixed shell environment setup for users without .bashrc files ## 1.0.64 - Agents: Added model customization support - you can now specify which model an agent should use - Agents: Fixed unintended access to the recursive agent tool - Hooks: Added systemMessage field to hook JSON output for displaying warnings and context - SDK: Fixed user input tracking across multi-turn conversations - Added hidden files to file search and @-mention suggestions ## 1.0.63 - Windows: Fixed file search, @agent mentions, and custom slash commands functionality ## 1.0.62 - Added @-mention support with typeahead for custom agents. @ to invoke it - Hooks: Added SessionStart hook for new session initialization - /add-dir command now supports typeahead for directory paths - Improved network connectivity check reliability ## 1.0.61 - Transcript mode (Ctrl+R): Changed Esc to exit transcript mode rather than interrupt - Settings: Added `--settings` flag to load settings from a JSON file - Settings: Fixed resolution of settings files paths that are symlinks - OTEL: Fixed reporting of wrong organization after authentication changes - Slash commands: Fixed permissions checking for allowed-tools with Bash - IDE: Added support for pasting images in VSCode MacOS using ⌘+V - IDE: Added `CLAUDE_CODE_AUTO_CONNECT_IDE=false` for disabling IDE auto-connection - Added `CLAUDE_CODE_SHELL_PREFIX` for wrapping Claude and user-provided shell commands run by Claude Code ## 1.0.60 - You can now create custom subagents for specialized tasks! Run /agents to get started ## 1.0.59 - SDK: Added tool confirmation support with canUseTool callback - SDK: Allow specifying env for spawned process - Hooks: Exposed PermissionDecision to hooks (including "ask") - Hooks: UserPromptSubmit now supports additionalContext in advanced JSON output - Fixed issue where some Max users that specified Opus would still see fallback to Sonnet ## 1.0.58 - Added support for reading PDFs - MCP: Improved server health status display in 'claude mcp list' - Hooks: Added CLAUDE_PROJECT_DIR env var for hook commands ## 1.0.57 - Added support for specifying a model in slash commands - Improved permission messages to help Claude understand allowed tools - Fix: Remove trailing newlines from bash output in terminal wrapping ## 1.0.56 - Windows: Enabled shift+tab for mode switching on versions of Node.js that support terminal VT mode - Fixes for WSL IDE detection - Fix an issue causing awsRefreshHelper changes to .aws directory not to be picked up ## 1.0.55 - Clarified knowledge cutoff for Opus 4 and Sonnet 4 models - Windows: fixed Ctrl+Z crash - SDK: Added ability to capture error logging - Add --system-prompt-file option to override system prompt in print mode ## 1.0.54 - Hooks: Added UserPromptSubmit hook and the current working directory to hook inputs - Custom slash commands: Added argument-hint to frontmatter - Windows: OAuth uses port 45454 and properly constructs browser URL - Windows: mode switching now uses alt + m, and plan mode renders properly - Shell: Switch to in-memory shell snapshot to fix file-related errors ## 1.0.53 - Updated @-mention file truncation from 100 lines to 2000 lines - Add helper script settings for AWS token refresh: awsAuthRefresh (for foreground operations like aws sso login) and awsCredentialExport (for background operation with STS-like response). ## 1.0.52 - Added support for MCP server instructions ## 1.0.51 - Added support for native Windows (requires Git for Windows) - Added support for Bedrock API keys through environment variable AWS_BEARER_TOKEN_BEDROCK - Settings: /doctor can now help you identify and fix invalid setting files - `--append-system-prompt` can now be used in interactive mode, not just --print/-p. - Increased auto-compact warning threshold from 60% to 80% - Fixed an issue with handling user directories with spaces for shell snapshots - OTEL resource now includes os.type, os.version, host.arch, and wsl.version (if running on Windows Subsystem for Linux) - Custom slash commands: Fixed user-level commands in subdirectories - Plan mode: Fixed issue where rejected plan from sub-task would get discarded ## 1.0.48 - Fixed a bug in v1.0.45 where the app would sometimes freeze on launch - Added progress messages to Bash tool based on the last 5 lines of command output - Added expanding variables support for MCP server configuration - Moved shell snapshots from /tmp to ~/.claude for more reliable Bash tool calls - Improved IDE extension path handling when Claude Code runs in WSL - Hooks: Added a PreCompact hook - Vim mode: Added c, f/F, t/T ## 1.0.45 - Redesigned Search (Grep) tool with new tool input parameters and features - Disabled IDE diffs for notebook files, fixing "Timeout waiting after 1000ms" error - Fixed config file corruption issue by enforcing atomic writes - Updated prompt input undo to Ctrl+\_ to avoid breaking existing Ctrl+U behavior, matching zsh's undo shortcut - Stop Hooks: Fixed transcript path after /clear and fixed triggering when loop ends with tool call - Custom slash commands: Restored namespacing in command names based on subdirectories. For example, .claude/commands/frontend/component.md is now /frontend:component, not /component. ## 1.0.44 - New /export command lets you quickly export a conversation for sharing - MCP: resource_link tool results are now supported - MCP: tool annotations and tool titles now display in /mcp view - Changed Ctrl+Z to suspend Claude Code. Resume by running `fg`. Prompt input undo is now Ctrl+U. ## 1.0.43 - Fixed a bug where the theme selector was saving excessively - Hooks: Added EPIPE system error handling ## 1.0.42 - Added tilde (`~`) expansion support to `/add-dir` command ## 1.0.41 - Hooks: Split Stop hook triggering into Stop and SubagentStop - Hooks: Enabled optional timeout configuration for each command - Hooks: Added "hook_event_name" to hook input - Fixed a bug where MCP tools would display twice in tool list - New tool parameters JSON for Bash tool in `tool_decision` event ## 1.0.40 - Fixed a bug causing API connection errors with UNABLE_TO_GET_ISSUER_CERT_LOCALLY if `NODE_EXTRA_CA_CERTS` was set ## 1.0.39 - New Active Time metric in OpenTelemetry logging ## 1.0.38 - Released hooks. Special thanks to community input in https://github.com/anthropics/claude-code/issues/712. Docs: https://code.claude.com/docs/en/hooks ## 1.0.37 - Remove ability to set `Proxy-Authorization` header via ANTHROPIC_AUTH_TOKEN or apiKeyHelper ## 1.0.36 - Web search now takes today's date into context - Fixed a bug where stdio MCP servers were not terminating properly on exit ## 1.0.35 - Added support for MCP OAuth Authorization Server discovery ## 1.0.34 - Fixed a memory leak causing a MaxListenersExceededWarning message to appear ## 1.0.33 - Improved logging functionality with session ID support - Added prompt input undo functionality (Ctrl+Z and vim 'u' command) - Improvements to plan mode ## 1.0.32 - Updated loopback config for litellm - Added forceLoginMethod setting to bypass login selection screen ## 1.0.31 - Fixed a bug where ~/.claude.json would get reset when file contained invalid JSON ## 1.0.30 - Custom slash commands: Run bash output, @-mention files, enable thinking with thinking keywords - Improved file path autocomplete with filename matching - Added timestamps in Ctrl-r mode and fixed Ctrl-c handling - Enhanced jq regex support for complex filters with pipes and select ## 1.0.29 - Improved CJK character support in cursor navigation and rendering ## 1.0.28 - Slash commands: Fix selector display during history navigation - Resizes images before upload to prevent API size limit errors - Added XDG_CONFIG_HOME support to configuration directory - Performance optimizations for memory usage - New attributes (terminal.type, language) in OpenTelemetry logging ## 1.0.27 - Streamable HTTP MCP servers are now supported - Remote MCP servers (SSE and HTTP) now support OAuth - MCP resources can now be @-mentioned - /resume slash command to switch conversations within Claude Code ## 1.0.25 - Slash commands: moved "project" and "user" prefixes to descriptions - Slash commands: improved reliability for command discovery - Improved support for Ghostty - Improved web search reliability ## 1.0.24 - Improved /mcp output - Fixed a bug where settings arrays got overwritten instead of merged ## 1.0.23 - Released TypeScript SDK: import @anthropic-ai/claude-code to get started - Released Python SDK: pip install claude-code-sdk to get started ## 1.0.22 - SDK: Renamed `total_cost` to `total_cost_usd` ## 1.0.21 - Improved editing of files with tab-based indentation - Fix for tool_use without matching tool_result errors - Fixed a bug where stdio MCP server processes would linger after quitting Claude Code ## 1.0.18 - Added --add-dir CLI argument for specifying additional working directories - Added streaming input support without require -p flag - Improved startup performance and session storage performance - Added CLAUDE_BASH_MAINTAIN_PROJECT_WORKING_DIR environment variable to freeze working directory for bash commands - Added detailed MCP server tools display (/mcp) - MCP authentication and permission improvements - Added auto-reconnection for MCP SSE connections on disconnect - Fixed issue where pasted content was lost when dialogs appeared ## 1.0.17 - We now emit messages from sub-tasks in -p mode (look for the parent_tool_use_id property) - Fixed crashes when the VS Code diff tool is invoked multiple times quickly - MCP server list UI improvements - Update Claude Code process title to display "claude" instead of "node" ## 1.0.11 - Claude Code can now also be used with a Claude Pro subscription - Added /upgrade for smoother switching to Claude Max plans - Improved UI for authentication from API keys and Bedrock/Vertex/external auth tokens - Improved shell configuration error handling - Improved todo list handling during compaction ## 1.0.10 - Added markdown table support - Improved streaming performance ## 1.0.8 - Fixed Vertex AI region fallback when using CLOUD_ML_REGION - Increased default otel interval from 1s -> 5s - Fixed edge cases where MCP_TIMEOUT and MCP_TOOL_TIMEOUT weren't being respected - Fixed a regression where search tools unnecessarily asked for permissions - Added support for triggering thinking non-English languages - Improved compacting UI ## 1.0.7 - Renamed /allowed-tools -> /permissions - Migrated allowedTools and ignorePatterns from .claude.json -> settings.json - Deprecated claude config commands in favor of editing settings.json - Fixed a bug where --dangerously-skip-permissions sometimes didn't work in --print mode - Improved error handling for /install-github-app - Bugfixes, UI polish, and tool reliability improvements ## 1.0.6 - Improved edit reliability for tab-indented files - Respect CLAUDE_CONFIG_DIR everywhere - Reduced unnecessary tool permission prompts - Added support for symlinks in @file typeahead - Bugfixes, UI polish, and tool reliability improvements ## 1.0.4 - Fixed a bug where MCP tool errors weren't being parsed correctly ## 1.0.1 - Added `DISABLE_INTERLEAVED_THINKING` to give users the option to opt out of interleaved thinking. - Improved model references to show provider-specific names (Sonnet 3.7 for Bedrock, Sonnet 4 for Console) - Updated documentation links and OAuth process descriptions ## 1.0.0 - Claude Code is now generally available - Introducing Sonnet 4 and Opus 4 models ## 0.2.125 - Breaking change: Bedrock ARN passed to `ANTHROPIC_MODEL` or `ANTHROPIC_SMALL_FAST_MODEL` should no longer contain an escaped slash (specify `/` instead of `%2F`) - Removed `DEBUG=true` in favor of `ANTHROPIC_LOG=debug`, to log all requests ## 0.2.117 - Breaking change: --print JSON output now returns nested message objects, for forwards-compatibility as we introduce new metadata fields - Introduced settings.cleanupPeriodDays - Introduced CLAUDE_CODE_API_KEY_HELPER_TTL_MS env var - Introduced --debug mode ## 0.2.108 - You can now send messages to Claude while it works to steer Claude in real-time - Introduced BASH_DEFAULT_TIMEOUT_MS and BASH_MAX_TIMEOUT_MS env vars - Fixed a bug where thinking was not working in -p mode - Fixed a regression in /cost reporting - Deprecated MCP wizard interface in favor of other MCP commands - Lots of other bugfixes and improvements ## 0.2.107 - CLAUDE.md files can now import other files. Add @path/to/file.md to ./CLAUDE.md to load additional files on launch ## 0.2.106 - MCP SSE server configs can now specify custom headers - Fixed a bug where MCP permission prompt didn't always show correctly ## 0.2.105 - Claude can now search the web - Moved system & account status to /status - Added word movement keybindings for Vim - Improved latency for startup, todo tool, and file edits ## 0.2.102 - Improved thinking triggering reliability - Improved @mention reliability for images and folders - You can now paste multiple large chunks into one prompt ## 0.2.100 - Fixed a crash caused by a stack overflow error - Made db storage optional; missing db support disables --continue and --resume ## 0.2.98 - Fixed an issue where auto-compact was running twice ## 0.2.96 - Claude Code can now also be used with a Claude Max subscription (https://claude.ai/upgrade) ## 0.2.93 - Resume conversations from where you left off from with "claude --continue" and "claude --resume" - Claude now has access to a Todo list that helps it stay on track and be more organized ## 0.2.82 - Added support for --disallowedTools - Renamed tools for consistency: LSTool -> LS, View -> Read, etc. ## 0.2.75 - Hit Enter to queue up additional messages while Claude is working - Drag in or copy/paste image files directly into the prompt - @-mention files to directly add them to context - Run one-off MCP servers with `claude --mcp-config ` - Improved performance for filename auto-complete ## 0.2.74 - Added support for refreshing dynamically generated API keys (via apiKeyHelper), with a 5 minute TTL - Task tool can now perform writes and run bash commands ## 0.2.72 - Updated spinner to indicate tokens loaded and tool usage ## 0.2.70 - Network commands like curl are now available for Claude to use - Claude can now run multiple web queries in parallel - Pressing ESC once immediately interrupts Claude in Auto-accept mode ## 0.2.69 - Fixed UI glitches with improved Select component behavior - Enhanced terminal output display with better text truncation logic ## 0.2.67 - Shared project permission rules can be saved in .claude/settings.json ## 0.2.66 - Print mode (-p) now supports streaming output via --output-format=stream-json - Fixed issue where pasting could trigger memory or bash mode unexpectedly ## 0.2.63 - Fixed an issue where MCP tools were loaded twice, which caused tool call errors ## 0.2.61 - Navigate menus with vim-style keys (j/k) or bash/emacs shortcuts (Ctrl+n/p) for faster interaction - Enhanced image detection for more reliable clipboard paste functionality - Fixed an issue where ESC key could crash the conversation history selector ## 0.2.59 - Copy+paste images directly into your prompt - Improved progress indicators for bash and fetch tools - Bugfixes for non-interactive mode (-p) ## 0.2.54 - Quickly add to Memory by starting your message with '#' - Press ctrl+r to see full output for long tool results - Added support for MCP SSE transport ## 0.2.53 - New web fetch tool lets Claude view URLs that you paste in - Fixed a bug with JPEG detection ## 0.2.50 - New MCP "project" scope now allows you to add MCP servers to .mcp.json files and commit them to your repository ## 0.2.49 - Previous MCP server scopes have been renamed: previous "project" scope is now "local" and "global" scope is now "user" ## 0.2.47 - Press Tab to auto-complete file and folder names - Press Shift + Tab to toggle auto-accept for file edits - Automatic conversation compaction for infinite conversation length (toggle with /config) ## 0.2.44 - Ask Claude to make a plan with thinking mode: just say 'think' or 'think harder' or even 'ultrathink' ## 0.2.41 - MCP server startup timeout can now be configured via MCP_TIMEOUT environment variable - MCP server startup no longer blocks the app from starting up ## 0.2.37 - New /release-notes command lets you view release notes at any time - `claude config add/remove` commands now accept multiple values separated by commas or spaces ## 0.2.36 - Import MCP servers from Claude Desktop with `claude mcp add-from-claude-desktop` - Add MCP servers as JSON strings with `claude mcp add-json ` ## 0.2.34 - Vim bindings for text input - enable with /vim or /config ## 0.2.32 - Interactive MCP setup wizard: Run "claude mcp add" to add MCP servers with a step-by-step interface - Fix for some PersistentShell issues ## 0.2.31 - Custom slash commands: Markdown files in .claude/commands/ directories now appear as custom slash commands to insert prompts into your conversation - MCP debug mode: Run with --mcp-debug flag to get more information about MCP server errors ## 0.2.30 - Added ANSI color theme for better terminal compatibility - Fixed issue where slash command arguments weren't being sent properly - (Mac-only) API keys are now stored in macOS Keychain ## 0.2.26 - New /approved-tools command for managing tool permissions - Word-level diff display for improved code readability - Fuzzy matching for slash commands ## 0.2.21 - Fuzzy matching for /commands ================================================ FILE: LICENSE.md ================================================ © Anthropic PBC. All rights reserved. Use is subject to Anthropic's [Commercial Terms of Service](https://www.anthropic.com/legal/commercial-terms). ================================================ FILE: README.md ================================================ # Claude Code ![](https://img.shields.io/badge/Node.js-18%2B-brightgreen?style=flat-square) [![npm]](https://www.npmjs.com/package/@anthropic-ai/claude-code) [npm]: https://img.shields.io/npm/v/@anthropic-ai/claude-code.svg?style=flat-square Claude Code is an agentic coding tool that lives in your terminal, understands your codebase, and helps you code faster by executing routine tasks, explaining complex code, and handling git workflows -- all through natural language commands. Use it in your terminal, IDE, or tag @claude on Github. **Learn more in the [official documentation](https://code.claude.com/docs/en/overview)**. ## Get started > [!NOTE] > Installation via npm is deprecated. Use one of the recommended methods below. For more installation options, uninstall steps, and troubleshooting, see the [setup documentation](https://code.claude.com/docs/en/setup). 1. Install Claude Code: **MacOS/Linux (Recommended):** ```bash curl -fsSL https://claude.ai/install.sh | bash ``` **Homebrew (MacOS/Linux):** ```bash brew install --cask claude-code ``` **Windows (Recommended):** ```powershell irm https://claude.ai/install.ps1 | iex ``` **WinGet (Windows):** ```powershell winget install Anthropic.ClaudeCode ``` **NPM (Deprecated):** ```bash npm install -g @anthropic-ai/claude-code ``` 2. Navigate to your project directory and run `claude`. ## Plugins This repository includes several Claude Code plugins that extend functionality with custom commands and agents. See the [plugins directory](./plugins/README.md) for detailed documentation on available plugins. ## Reporting Bugs We welcome your feedback. Use the `/bug` command to report issues directly within Claude Code, or file a [GitHub issue](https://github.com/anthropics/claude-code/issues). ## Connect on Discord Join the [Claude Developers Discord](https://anthropic.com/discord) to connect with other developers using Claude Code. Get help, share feedback, and discuss your projects with the community. ## Data collection, usage, and retention When you use Claude Code, we collect feedback, which includes usage data (such as code acceptance or rejections), associated conversation data, and user feedback submitted via the `/bug` command. ### How we use your data See our [data usage policies](https://code.claude.com/docs/en/data-usage). ### Privacy safeguards We have implemented several safeguards to protect your data, including limited retention periods for sensitive information, restricted access to user session data, and clear policies against using feedback for model training. For full details, please review our [Commercial Terms of Service](https://www.anthropic.com/legal/commercial-terms) and [Privacy Policy](https://www.anthropic.com/legal/privacy). ================================================ FILE: SECURITY.md ================================================ # Security Policy Thank you for helping us keep Claude Code secure! ## Reporting Security Issues The security of our systems and user data is Anthropic's top priority. We appreciate the work of security researchers acting in good faith in identifying and reporting potential vulnerabilities. Our security program is managed on HackerOne and we ask that any validated vulnerability in this functionality be reported through their [submission form](https://hackerone.com/anthropic-vdp/reports/new?type=team&report_type=vulnerability). ## Vulnerability Disclosure Program Our Vulnerability Program Guidelines are defined on our [HackerOne program page](https://hackerone.com/anthropic-vdp). ================================================ FILE: Script/run_devcontainer_claude_code.ps1 ================================================ <# .SYNOPSIS Automates the setup and connection to a DevContainer environment using either Docker or Podman on Windows. .DESCRIPTION This script automates the process of initializing, starting, and connecting to a DevContainer using either Docker or Podman as the container backend. It must be executed from the root directory of your project and assumes the script is located in a 'Script' subdirectory. .PARAMETER Backend Specifies the container backend to use. Valid values are 'docker' or 'podman'. .EXAMPLE .\Script\run_devcontainer_claude_code.ps1 -Backend docker Uses Docker as the container backend. .EXAMPLE .\Script\run_devcontainer_claude_code.ps1 -Backend podman Uses Podman as the container backend. .NOTES Project Structure: Project/ ├── .devcontainer/ └── Script/ └── run_devcontainer_claude_code.ps1 #> [CmdletBinding()] param( [Parameter(Mandatory=$true)] [ValidateSet('docker','podman')] [string]$Backend ) # Notify script start Write-Host "--- DevContainer Startup & Connection Script ---" Write-Host "Using backend: $($Backend)" # --- Prerequisite Check --- Write-Host "Checking for required commands..." try { if (-not (Get-Command $Backend -ErrorAction SilentlyContinue)) { throw "Required command '$($Backend)' not found." } Write-Host "- $($Backend) command found." if (-not (Get-Command devcontainer -ErrorAction SilentlyContinue)) { throw "Required command 'devcontainer' not found." } Write-Host "- devcontainer command found." } catch { Write-Error "A required command is not installed or not in your PATH. $($_.Exception.Message)" Write-Error "Please ensure both '$Backend' and 'devcontainer' are installed and accessible in your system's PATH." exit 1 } # --- Backend-Specific Initialization --- if ($Backend -eq 'podman') { Write-Host "--- Podman Backend Initialization ---" # --- Step 1a: Initialize Podman machine --- Write-Host "Initializing Podman machine 'claudeVM'..." try { & podman machine init claudeVM Write-Host "Podman machine 'claudeVM' initialized or already exists." } catch { Write-Error "Failed to initialize Podman machine: $($_.Exception.Message)" exit 1 # Exit script on error } # --- Step 1b: Start Podman machine --- Write-Host "Starting Podman machine 'claudeVM'..." try { & podman machine start claudeVM -q Write-Host "Podman machine started or already running." } catch { Write-Error "Failed to start Podman machine: $($_.Exception.Message)" exit 1 } # --- Step 2: Set default connection --- Write-Host "Setting default Podman connection to 'claudeVM'..." try { & podman system connection default claudeVM Write-Host "Default connection set." } catch { Write-Warning "Failed to set default Podman connection (may be already set or machine issue): $($_.Exception.Message)" } } elseif ($Backend -eq 'docker') { Write-Host "--- Docker Backend Initialization ---" # --- Step 1 & 2: Check Docker Desktop --- Write-Host "Checking if Docker Desktop is running and docker command is available..." try { docker info | Out-Null Write-Host "Docker Desktop (daemon) is running." } catch { Write-Error "Docker Desktop is not running or docker command not found." Write-Error "Please ensure Docker Desktop is running." exit 1 } } # --- Step 3: Bring up DevContainer --- Write-Host "Bringing up DevContainer in the current folder..." try { $arguments = @('up', '--workspace-folder', '.') if ($Backend -eq 'podman') { $arguments += '--docker-path', 'podman' } & devcontainer @arguments Write-Host "DevContainer startup process completed." } catch { Write-Error "Failed to bring up DevContainer: $($_.Exception.Message)" exit 1 } # --- Step 4: Get DevContainer ID --- Write-Host "Finding the DevContainer ID..." $currentFolder = (Get-Location).Path try { $containerId = (& $Backend ps --filter "label=devcontainer.local_folder=$currentFolder" --format '{{.ID}}').Trim() } catch { $displayCommand = "$Backend ps --filter `"label=devcontainer.local_folder=$currentFolder`" --format '{{.ID}}'" Write-Error "Failed to get container ID (Command: $displayCommand): $($_.Exception.Message)" exit 1 } if (-not $containerId) { Write-Error "Could not find DevContainer ID for the current folder ('$currentFolder')." Write-Error "Please check if 'devcontainer up' was successful and the container is running." exit 1 } Write-Host "Found container ID: $containerId" # --- Step 5 & 6: Execute command and enter interactive shell inside container --- Write-Host "Executing 'claude' command and then starting zsh session inside container $($containerId)..." try { & $Backend exec -it $containerId zsh -c 'claude; exec zsh' Write-Host "Interactive session ended." } catch { $displayCommand = "$Backend exec -it $containerId zsh -c 'claude; exec zsh'" Write-Error "Failed to execute command inside container (Command: $displayCommand): $($_.Exception.Message)" exit 1 } # Notify script completion Write-Host "--- Script completed ---" ================================================ FILE: examples/hooks/bash_command_validator_example.py ================================================ #!/usr/bin/env python3 """ Claude Code Hook: Bash Command Validator ========================================= This hook runs as a PreToolUse hook for the Bash tool. It validates bash commands against a set of rules before execution. In this case it changes grep calls to using rg. Read more about hooks here: https://docs.anthropic.com/en/docs/claude-code/hooks Make sure to change your path to your actual script. { "hooks": { "PreToolUse": [ { "matcher": "Bash", "hooks": [ { "type": "command", "command": "python3 /path/to/claude-code/examples/hooks/bash_command_validator_example.py" } ] } ] } } """ import json import re import sys # Define validation rules as a list of (regex pattern, message) tuples _VALIDATION_RULES = [ ( r"^grep\b(?!.*\|)", "Use 'rg' (ripgrep) instead of 'grep' for better performance and features", ), ( r"^find\s+\S+\s+-name\b", "Use 'rg --files | rg pattern' or 'rg --files -g pattern' instead of 'find -name' for better performance", ), ] def _validate_command(command: str) -> list[str]: issues = [] for pattern, message in _VALIDATION_RULES: if re.search(pattern, command): issues.append(message) return issues def main(): try: input_data = json.load(sys.stdin) except json.JSONDecodeError as e: print(f"Error: Invalid JSON input: {e}", file=sys.stderr) # Exit code 1 shows stderr to the user but not to Claude sys.exit(1) tool_name = input_data.get("tool_name", "") if tool_name != "Bash": sys.exit(0) tool_input = input_data.get("tool_input", {}) command = tool_input.get("command", "") if not command: sys.exit(0) issues = _validate_command(command) if issues: for message in issues: print(f"• {message}", file=sys.stderr) # Exit code 2 blocks tool call and shows stderr to Claude sys.exit(2) if __name__ == "__main__": main() ================================================ FILE: examples/settings/README.md ================================================ # Settings Examples Example Claude Code settings files, primarily intended for organization-wide deployments. Use these are starting points — adjust them to fit your needs. These may be applied at any level of the [settings hierarchy](https://code.claude.com/docs/en/settings#settings-files), though certain properties only take effect if specified in enterprise settings (e.g. `strictKnownMarketplaces`, `allowManagedHooksOnly`, `allowManagedPermissionRulesOnly`). ## Configuration Examples > [!WARNING] > These examples are community-maintained snippets which may be unsupported or incorrect. You are responsible for the correctness of your own settings configuration. | Setting | [`settings-lax.json`](./settings-lax.json) | [`settings-strict.json`](./settings-strict.json) | [`settings-bash-sandbox.json`](./settings-bash-sandbox.json) | |---------|:---:|:---:|:---:| | Disable `--dangerously-skip-permissions` | ✅ | ✅ | | | Block plugin marketplaces | ✅ | ✅ | | | Block user and project-defined permission `allow` / `ask` / `deny` | | ✅ | ✅ | | Block user and project-defined hooks | | ✅ | | | Deny web fetch and search tools | | ✅ | | | Bash tool requires approval | | ✅ | | | Bash tool must run inside of sandbox | | | ✅ | ## Tips - Consider merging snippets of the above examples to reach your desired configuration - Settings files must be valid JSON - Before deploying configuration files to your organization, test them locally by applying to `managed-settings.json`, `settings.json` or `settings.local.json` - The `sandbox` property only applies to the `Bash` tool; it does not apply to other tools (like Read, Write, WebSearch, WebFetch, MCPs), hooks, or internal commands ## Full Documentation See https://code.claude.com/docs/en/settings for complete documentation on all available managed settings. ================================================ FILE: examples/settings/settings-bash-sandbox.json ================================================ { "allowManagedPermissionRulesOnly": true, "sandbox": { "enabled": true, "autoAllowBashIfSandboxed": false, "allowUnsandboxedCommands": false, "excludedCommands": [], "network": { "allowUnixSockets": [], "allowAllUnixSockets": false, "allowLocalBinding": false, "allowedDomains": [], "httpProxyPort": null, "socksProxyPort": null }, "enableWeakerNestedSandbox": false } } ================================================ FILE: examples/settings/settings-lax.json ================================================ { "permissions": { "disableBypassPermissionsMode": "disable" }, "strictKnownMarketplaces": [] } ================================================ FILE: examples/settings/settings-strict.json ================================================ { "permissions": { "disableBypassPermissionsMode": "disable", "ask": [ "Bash" ], "deny": [ "WebSearch", "WebFetch" ] }, "allowManagedPermissionRulesOnly": true, "allowManagedHooksOnly": true, "strictKnownMarketplaces": [], "sandbox": { "autoAllowBashIfSandboxed": false, "excludedCommands": [], "network": { "allowUnixSockets": [], "allowAllUnixSockets": false, "allowLocalBinding": false, "allowedDomains": [], "httpProxyPort": null, "socksProxyPort": null }, "enableWeakerNestedSandbox": false } } ================================================ FILE: plugins/README.md ================================================ # Claude Code Plugins This directory contains some official Claude Code plugins that extend functionality through custom commands, agents, and workflows. These are examples of what's possible with the Claude Code plugin system—many more plugins are available through community marketplaces. ## What are Claude Code Plugins? Claude Code plugins are extensions that enhance Claude Code with custom slash commands, specialized agents, hooks, and MCP servers. Plugins can be shared across projects and teams, providing consistent tooling and workflows. Learn more in the [official plugins documentation](https://docs.claude.com/en/docs/claude-code/plugins). ## Plugins in This Directory | Name | Description | Contents | |------|-------------|----------| | [agent-sdk-dev](./agent-sdk-dev/) | Development kit for working with the Claude Agent SDK | **Command:** `/new-sdk-app` - Interactive setup for new Agent SDK projects
**Agents:** `agent-sdk-verifier-py`, `agent-sdk-verifier-ts` - Validate SDK applications against best practices | | [claude-opus-4-5-migration](./claude-opus-4-5-migration/) | Migrate code and prompts from Sonnet 4.x and Opus 4.1 to Opus 4.5 | **Skill:** `claude-opus-4-5-migration` - Automated migration of model strings, beta headers, and prompt adjustments | | [code-review](./code-review/) | Automated PR code review using multiple specialized agents with confidence-based scoring to filter false positives | **Command:** `/code-review` - Automated PR review workflow
**Agents:** 5 parallel Sonnet agents for CLAUDE.md compliance, bug detection, historical context, PR history, and code comments | | [commit-commands](./commit-commands/) | Git workflow automation for committing, pushing, and creating pull requests | **Commands:** `/commit`, `/commit-push-pr`, `/clean_gone` - Streamlined git operations | | [explanatory-output-style](./explanatory-output-style/) | Adds educational insights about implementation choices and codebase patterns (mimics the deprecated Explanatory output style) | **Hook:** SessionStart - Injects educational context at the start of each session | | [feature-dev](./feature-dev/) | Comprehensive feature development workflow with a structured 7-phase approach | **Command:** `/feature-dev` - Guided feature development workflow
**Agents:** `code-explorer`, `code-architect`, `code-reviewer` - For codebase analysis, architecture design, and quality review | | [frontend-design](./frontend-design/) | Create distinctive, production-grade frontend interfaces that avoid generic AI aesthetics | **Skill:** `frontend-design` - Auto-invoked for frontend work, providing guidance on bold design choices, typography, animations, and visual details | | [hookify](./hookify/) | Easily create custom hooks to prevent unwanted behaviors by analyzing conversation patterns or explicit instructions | **Commands:** `/hookify`, `/hookify:list`, `/hookify:configure`, `/hookify:help`
**Agent:** `conversation-analyzer` - Analyzes conversations for problematic behaviors
**Skill:** `writing-rules` - Guidance on hookify rule syntax | | [learning-output-style](./learning-output-style/) | Interactive learning mode that requests meaningful code contributions at decision points (mimics the unshipped Learning output style) | **Hook:** SessionStart - Encourages users to write meaningful code (5-10 lines) at decision points while receiving educational insights | | [plugin-dev](./plugin-dev/) | Comprehensive toolkit for developing Claude Code plugins with 7 expert skills and AI-assisted creation | **Command:** `/plugin-dev:create-plugin` - 8-phase guided workflow for building plugins
**Agents:** `agent-creator`, `plugin-validator`, `skill-reviewer`
**Skills:** Hook development, MCP integration, plugin structure, settings, commands, agents, and skill development | | [pr-review-toolkit](./pr-review-toolkit/) | Comprehensive PR review agents specializing in comments, tests, error handling, type design, code quality, and code simplification | **Command:** `/pr-review-toolkit:review-pr` - Run with optional review aspects (comments, tests, errors, types, code, simplify, all)
**Agents:** `comment-analyzer`, `pr-test-analyzer`, `silent-failure-hunter`, `type-design-analyzer`, `code-reviewer`, `code-simplifier` | | [ralph-wiggum](./ralph-wiggum/) | Interactive self-referential AI loops for iterative development. Claude works on the same task repeatedly until completion | **Commands:** `/ralph-loop`, `/cancel-ralph` - Start/stop autonomous iteration loops
**Hook:** Stop - Intercepts exit attempts to continue iteration | | [security-guidance](./security-guidance/) | Security reminder hook that warns about potential security issues when editing files | **Hook:** PreToolUse - Monitors 9 security patterns including command injection, XSS, eval usage, dangerous HTML, pickle deserialization, and os.system calls | ## Installation These plugins are included in the Claude Code repository. To use them in your own projects: 1. Install Claude Code globally: ```bash npm install -g @anthropic-ai/claude-code ``` 2. Navigate to your project and run Claude Code: ```bash claude ``` 3. Use the `/plugin` command to install plugins from marketplaces, or configure them in your project's `.claude/settings.json`. For detailed plugin installation and configuration, see the [official documentation](https://docs.claude.com/en/docs/claude-code/plugins). ## Plugin Structure Each plugin follows the standard Claude Code plugin structure: ``` plugin-name/ ├── .claude-plugin/ │ └── plugin.json # Plugin metadata ├── commands/ # Slash commands (optional) ├── agents/ # Specialized agents (optional) ├── skills/ # Agent Skills (optional) ├── hooks/ # Event handlers (optional) ├── .mcp.json # External tool configuration (optional) └── README.md # Plugin documentation ``` ## Contributing When adding new plugins to this directory: 1. Follow the standard plugin structure 2. Include a comprehensive README.md 3. Add plugin metadata in `.claude-plugin/plugin.json` 4. Document all commands and agents 5. Provide usage examples ## Learn More - [Claude Code Documentation](https://docs.claude.com/en/docs/claude-code/overview) - [Plugin System Documentation](https://docs.claude.com/en/docs/claude-code/plugins) - [Agent SDK Documentation](https://docs.claude.com/en/api/agent-sdk/overview) ================================================ FILE: plugins/agent-sdk-dev/.claude-plugin/plugin.json ================================================ { "name": "agent-sdk-dev", "description": "Claude Agent SDK Development Plugin", "version": "1.0.0", "author": { "name": "Ashwin Bhat", "email": "ashwin@anthropic.com" } } ================================================ FILE: plugins/agent-sdk-dev/README.md ================================================ # Agent SDK Development Plugin A comprehensive plugin for creating and verifying Claude Agent SDK applications in Python and TypeScript. ## Overview The Agent SDK Development Plugin streamlines the entire lifecycle of building Agent SDK applications, from initial scaffolding to verification against best practices. It helps you quickly start new projects with the latest SDK versions and ensures your applications follow official documentation patterns. ## Features ### Command: `/new-sdk-app` Interactive command that guides you through creating a new Claude Agent SDK application. **What it does:** - Asks clarifying questions about your project (language, name, agent type, starting point) - Checks for and installs the latest SDK version - Creates all necessary project files and configuration - Sets up proper environment files (.env.example, .gitignore) - Provides a working example tailored to your use case - Runs type checking (TypeScript) or syntax validation (Python) - Automatically verifies the setup using the appropriate verifier agent **Usage:** ```bash /new-sdk-app my-project-name ``` Or simply: ```bash /new-sdk-app ``` The command will interactively ask you: 1. Language choice (TypeScript or Python) 2. Project name (if not provided) 3. Agent type (coding, business, custom) 4. Starting point (minimal, basic, or specific example) 5. Tooling preferences (npm/yarn/pnpm or pip/poetry) **Example:** ```bash /new-sdk-app customer-support-agent # → Creates a new Agent SDK project for a customer support agent # → Sets up TypeScript or Python environment # → Installs latest SDK version # → Verifies the setup automatically ``` ### Agent: `agent-sdk-verifier-py` Thoroughly verifies Python Agent SDK applications for correct setup and best practices. **Verification checks:** - SDK installation and version - Python environment setup (requirements.txt, pyproject.toml) - Correct SDK usage and patterns - Agent initialization and configuration - Environment and security (.env, API keys) - Error handling and functionality - Documentation completeness **When to use:** - After creating a new Python SDK project - After modifying an existing Python SDK application - Before deploying a Python SDK application **Usage:** The agent runs automatically after `/new-sdk-app` creates a Python project, or you can trigger it by asking: ``` "Verify my Python Agent SDK application" "Check if my SDK app follows best practices" ``` **Output:** Provides a comprehensive report with: - Overall status (PASS / PASS WITH WARNINGS / FAIL) - Critical issues that prevent functionality - Warnings about suboptimal patterns - List of passed checks - Specific recommendations with SDK documentation references ### Agent: `agent-sdk-verifier-ts` Thoroughly verifies TypeScript Agent SDK applications for correct setup and best practices. **Verification checks:** - SDK installation and version - TypeScript configuration (tsconfig.json) - Correct SDK usage and patterns - Type safety and imports - Agent initialization and configuration - Environment and security (.env, API keys) - Error handling and functionality - Documentation completeness **When to use:** - After creating a new TypeScript SDK project - After modifying an existing TypeScript SDK application - Before deploying a TypeScript SDK application **Usage:** The agent runs automatically after `/new-sdk-app` creates a TypeScript project, or you can trigger it by asking: ``` "Verify my TypeScript Agent SDK application" "Check if my SDK app follows best practices" ``` **Output:** Provides a comprehensive report with: - Overall status (PASS / PASS WITH WARNINGS / FAIL) - Critical issues that prevent functionality - Warnings about suboptimal patterns - List of passed checks - Specific recommendations with SDK documentation references ## Workflow Example Here's a typical workflow using this plugin: 1. **Create a new project:** ```bash /new-sdk-app code-reviewer-agent ``` 2. **Answer the interactive questions:** ``` Language: TypeScript Agent type: Coding agent (code review) Starting point: Basic agent with common features ``` 3. **Automatic verification:** The command automatically runs `agent-sdk-verifier-ts` to ensure everything is correctly set up. 4. **Start developing:** ```bash # Set your API key echo "ANTHROPIC_API_KEY=your_key_here" > .env # Run your agent npm start ``` 5. **Verify after changes:** ``` "Verify my SDK application" ``` ## Installation This plugin is included in the Claude Code repository. To use it: 1. Ensure Claude Code is installed 2. The plugin commands and agents are automatically available ## Best Practices - **Always use the latest SDK version**: `/new-sdk-app` checks for and installs the latest version - **Verify before deploying**: Run the verifier agent before deploying to production - **Keep API keys secure**: Never commit `.env` files or hardcode API keys - **Follow SDK documentation**: The verifier agents check against official patterns - **Type check TypeScript projects**: Run `npx tsc --noEmit` regularly - **Test your agents**: Create test cases for your agent's functionality ## Resources - [Agent SDK Overview](https://docs.claude.com/en/api/agent-sdk/overview) - [TypeScript SDK Reference](https://docs.claude.com/en/api/agent-sdk/typescript) - [Python SDK Reference](https://docs.claude.com/en/api/agent-sdk/python) - [Agent SDK Examples](https://docs.claude.com/en/api/agent-sdk/examples) ## Troubleshooting ### Type errors in TypeScript project **Issue**: TypeScript project has type errors after creation **Solution**: - The `/new-sdk-app` command runs type checking automatically - If errors persist, check that you're using the latest SDK version - Verify your `tsconfig.json` matches SDK requirements ### Python import errors **Issue**: Cannot import from `claude_agent_sdk` **Solution**: - Ensure you've installed dependencies: `pip install -r requirements.txt` - Activate your virtual environment if using one - Check that the SDK is installed: `pip show claude-agent-sdk` ### Verification fails with warnings **Issue**: Verifier agent reports warnings **Solution**: - Review the specific warnings in the report - Check the SDK documentation references provided - Warnings don't prevent functionality but indicate areas for improvement ## Author Ashwin Bhat (ashwin@anthropic.com) ## Version 1.0.0 ================================================ FILE: plugins/agent-sdk-dev/agents/agent-sdk-verifier-py.md ================================================ --- name: agent-sdk-verifier-py description: Use this agent to verify that a Python Agent SDK application is properly configured, follows SDK best practices and documentation recommendations, and is ready for deployment or testing. This agent should be invoked after a Python Agent SDK app has been created or modified. model: sonnet --- You are a Python Agent SDK application verifier. Your role is to thoroughly inspect Python Agent SDK applications for correct SDK usage, adherence to official documentation recommendations, and readiness for deployment. ## Verification Focus Your verification should prioritize SDK functionality and best practices over general code style. Focus on: 1. **SDK Installation and Configuration**: - Verify `claude-agent-sdk` is installed (check requirements.txt, pyproject.toml, or pip list) - Check that the SDK version is reasonably current (not ancient) - Validate Python version requirements are met (typically Python 3.8+) - Confirm virtual environment is recommended/documented if applicable 2. **Python Environment Setup**: - Check for requirements.txt or pyproject.toml - Verify dependencies are properly specified - Ensure Python version constraints are documented if needed - Validate that the environment can be reproduced 3. **SDK Usage and Patterns**: - Verify correct imports from `claude_agent_sdk` (or appropriate SDK module) - Check that agents are properly initialized according to SDK docs - Validate that agent configuration follows SDK patterns (system prompts, models, etc.) - Ensure SDK methods are called correctly with proper parameters - Check for proper handling of agent responses (streaming vs single mode) - Verify permissions are configured correctly if used - Validate MCP server integration if present 4. **Code Quality**: - Check for basic syntax errors - Verify imports are correct and available - Ensure proper error handling - Validate that the code structure makes sense for the SDK 5. **Environment and Security**: - Check that `.env.example` exists with `ANTHROPIC_API_KEY` - Verify `.env` is in `.gitignore` - Ensure API keys are not hardcoded in source files - Validate proper error handling around API calls 6. **SDK Best Practices** (based on official docs): - System prompts are clear and well-structured - Appropriate model selection for the use case - Permissions are properly scoped if used - Custom tools (MCP) are correctly integrated if present - Subagents are properly configured if used - Session handling is correct if applicable 7. **Functionality Validation**: - Verify the application structure makes sense for the SDK - Check that agent initialization and execution flow is correct - Ensure error handling covers SDK-specific errors - Validate that the app follows SDK documentation patterns 8. **Documentation**: - Check for README or basic documentation - Verify setup instructions are present (including virtual environment setup) - Ensure any custom configurations are documented - Confirm installation instructions are clear ## What NOT to Focus On - General code style preferences (PEP 8 formatting, naming conventions, etc.) - Python-specific style choices (snake_case vs camelCase debates) - Import ordering preferences - General Python best practices unrelated to SDK usage ## Verification Process 1. **Read the relevant files**: - requirements.txt or pyproject.toml - Main application files (main.py, app.py, src/\*, etc.) - .env.example and .gitignore - Any configuration files 2. **Check SDK Documentation Adherence**: - Use WebFetch to reference the official Python SDK docs: https://docs.claude.com/en/api/agent-sdk/python - Compare the implementation against official patterns and recommendations - Note any deviations from documented best practices 3. **Validate Imports and Syntax**: - Check that all imports are correct - Look for obvious syntax errors - Verify SDK is properly imported 4. **Analyze SDK Usage**: - Verify SDK methods are used correctly - Check that configuration options match SDK documentation - Validate that patterns follow official examples ## Verification Report Format Provide a comprehensive report: **Overall Status**: PASS | PASS WITH WARNINGS | FAIL **Summary**: Brief overview of findings **Critical Issues** (if any): - Issues that prevent the app from functioning - Security problems - SDK usage errors that will cause runtime failures - Syntax errors or import problems **Warnings** (if any): - Suboptimal SDK usage patterns - Missing SDK features that would improve the app - Deviations from SDK documentation recommendations - Missing documentation or setup instructions **Passed Checks**: - What is correctly configured - SDK features properly implemented - Security measures in place **Recommendations**: - Specific suggestions for improvement - References to SDK documentation - Next steps for enhancement Be thorough but constructive. Focus on helping the developer build a functional, secure, and well-configured Agent SDK application that follows official patterns. ================================================ FILE: plugins/agent-sdk-dev/agents/agent-sdk-verifier-ts.md ================================================ --- name: agent-sdk-verifier-ts description: Use this agent to verify that a TypeScript Agent SDK application is properly configured, follows SDK best practices and documentation recommendations, and is ready for deployment or testing. This agent should be invoked after a TypeScript Agent SDK app has been created or modified. model: sonnet --- You are a TypeScript Agent SDK application verifier. Your role is to thoroughly inspect TypeScript Agent SDK applications for correct SDK usage, adherence to official documentation recommendations, and readiness for deployment. ## Verification Focus Your verification should prioritize SDK functionality and best practices over general code style. Focus on: 1. **SDK Installation and Configuration**: - Verify `@anthropic-ai/claude-agent-sdk` is installed - Check that the SDK version is reasonably current (not ancient) - Confirm package.json has `"type": "module"` for ES modules support - Validate that Node.js version requirements are met (check package.json engines field if present) 2. **TypeScript Configuration**: - Verify tsconfig.json exists and has appropriate settings for the SDK - Check module resolution settings (should support ES modules) - Ensure target is modern enough for the SDK - Validate that compilation settings won't break SDK imports 3. **SDK Usage and Patterns**: - Verify correct imports from `@anthropic-ai/claude-agent-sdk` - Check that agents are properly initialized according to SDK docs - Validate that agent configuration follows SDK patterns (system prompts, models, etc.) - Ensure SDK methods are called correctly with proper parameters - Check for proper handling of agent responses (streaming vs single mode) - Verify permissions are configured correctly if used - Validate MCP server integration if present 4. **Type Safety and Compilation**: - Run `npx tsc --noEmit` to check for type errors - Verify that all SDK imports have correct type definitions - Ensure the code compiles without errors - Check that types align with SDK documentation 5. **Scripts and Build Configuration**: - Verify package.json has necessary scripts (build, start, typecheck) - Check that scripts are correctly configured for TypeScript/ES modules - Validate that the application can be built and run 6. **Environment and Security**: - Check that `.env.example` exists with `ANTHROPIC_API_KEY` - Verify `.env` is in `.gitignore` - Ensure API keys are not hardcoded in source files - Validate proper error handling around API calls 7. **SDK Best Practices** (based on official docs): - System prompts are clear and well-structured - Appropriate model selection for the use case - Permissions are properly scoped if used - Custom tools (MCP) are correctly integrated if present - Subagents are properly configured if used - Session handling is correct if applicable 8. **Functionality Validation**: - Verify the application structure makes sense for the SDK - Check that agent initialization and execution flow is correct - Ensure error handling covers SDK-specific errors - Validate that the app follows SDK documentation patterns 9. **Documentation**: - Check for README or basic documentation - Verify setup instructions are present if needed - Ensure any custom configurations are documented ## What NOT to Focus On - General code style preferences (formatting, naming conventions, etc.) - Whether developers use `type` vs `interface` or other TypeScript style choices - Unused variable naming conventions - General TypeScript best practices unrelated to SDK usage ## Verification Process 1. **Read the relevant files**: - package.json - tsconfig.json - Main application files (index.ts, src/\*, etc.) - .env.example and .gitignore - Any configuration files 2. **Check SDK Documentation Adherence**: - Use WebFetch to reference the official TypeScript SDK docs: https://docs.claude.com/en/api/agent-sdk/typescript - Compare the implementation against official patterns and recommendations - Note any deviations from documented best practices 3. **Run Type Checking**: - Execute `npx tsc --noEmit` to verify no type errors - Report any compilation issues 4. **Analyze SDK Usage**: - Verify SDK methods are used correctly - Check that configuration options match SDK documentation - Validate that patterns follow official examples ## Verification Report Format Provide a comprehensive report: **Overall Status**: PASS | PASS WITH WARNINGS | FAIL **Summary**: Brief overview of findings **Critical Issues** (if any): - Issues that prevent the app from functioning - Security problems - SDK usage errors that will cause runtime failures - Type errors or compilation failures **Warnings** (if any): - Suboptimal SDK usage patterns - Missing SDK features that would improve the app - Deviations from SDK documentation recommendations - Missing documentation **Passed Checks**: - What is correctly configured - SDK features properly implemented - Security measures in place **Recommendations**: - Specific suggestions for improvement - References to SDK documentation - Next steps for enhancement Be thorough but constructive. Focus on helping the developer build a functional, secure, and well-configured Agent SDK application that follows official patterns. ================================================ FILE: plugins/agent-sdk-dev/commands/new-sdk-app.md ================================================ --- description: Create and setup a new Claude Agent SDK application argument-hint: [project-name] --- You are tasked with helping the user create a new Claude Agent SDK application. Follow these steps carefully: ## Reference Documentation Before starting, review the official documentation to ensure you provide accurate and up-to-date guidance. Use WebFetch to read these pages: 1. **Start with the overview**: https://docs.claude.com/en/api/agent-sdk/overview 2. **Based on the user's language choice, read the appropriate SDK reference**: - TypeScript: https://docs.claude.com/en/api/agent-sdk/typescript - Python: https://docs.claude.com/en/api/agent-sdk/python 3. **Read relevant guides mentioned in the overview** such as: - Streaming vs Single Mode - Permissions - Custom Tools - MCP integration - Subagents - Sessions - Any other relevant guides based on the user's needs **IMPORTANT**: Always check for and use the latest versions of packages. Use WebSearch or WebFetch to verify current versions before installation. ## Gather Requirements IMPORTANT: Ask these questions one at a time. Wait for the user's response before asking the next question. This makes it easier for the user to respond. Ask the questions in this order (skip any that the user has already provided via arguments): 1. **Language** (ask first): "Would you like to use TypeScript or Python?" - Wait for response before continuing 2. **Project name** (ask second): "What would you like to name your project?" - If $ARGUMENTS is provided, use that as the project name and skip this question - Wait for response before continuing 3. **Agent type** (ask third, but skip if #2 was sufficiently detailed): "What kind of agent are you building? Some examples: - Coding agent (SRE, security review, code review) - Business agent (customer support, content creation) - Custom agent (describe your use case)" - Wait for response before continuing 4. **Starting point** (ask fourth): "Would you like: - A minimal 'Hello World' example to start - A basic agent with common features - A specific example based on your use case" - Wait for response before continuing 5. **Tooling choice** (ask fifth): Let the user know what tools you'll use, and confirm with them that these are the tools they want to use (for example, they may prefer pnpm or bun over npm). Respect the user's preferences when executing on the requirements. After all questions are answered, proceed to create the setup plan. ## Setup Plan Based on the user's answers, create a plan that includes: 1. **Project initialization**: - Create project directory (if it doesn't exist) - Initialize package manager: - TypeScript: `npm init -y` and setup `package.json` with type: "module" and scripts (include a "typecheck" script) - Python: Create `requirements.txt` or use `poetry init` - Add necessary configuration files: - TypeScript: Create `tsconfig.json` with proper settings for the SDK - Python: Optionally create config files if needed 2. **Check for Latest Versions**: - BEFORE installing, use WebSearch or check npm/PyPI to find the latest version - For TypeScript: Check https://www.npmjs.com/package/@anthropic-ai/claude-agent-sdk - For Python: Check https://pypi.org/project/claude-agent-sdk/ - Inform the user which version you're installing 3. **SDK Installation**: - TypeScript: `npm install @anthropic-ai/claude-agent-sdk@latest` (or specify latest version) - Python: `pip install claude-agent-sdk` (pip installs latest by default) - After installation, verify the installed version: - TypeScript: Check package.json or run `npm list @anthropic-ai/claude-agent-sdk` - Python: Run `pip show claude-agent-sdk` 4. **Create starter files**: - TypeScript: Create an `index.ts` or `src/index.ts` with a basic query example - Python: Create a `main.py` with a basic query example - Include proper imports and basic error handling - Use modern, up-to-date syntax and patterns from the latest SDK version 5. **Environment setup**: - Create a `.env.example` file with `ANTHROPIC_API_KEY=your_api_key_here` - Add `.env` to `.gitignore` - Explain how to get an API key from https://console.anthropic.com/ 6. **Optional: Create .claude directory structure**: - Offer to create `.claude/` directory for agents, commands, and settings - Ask if they want any example subagents or slash commands ## Implementation After gathering requirements and getting user confirmation on the plan: 1. Check for latest package versions using WebSearch or WebFetch 2. Execute the setup steps 3. Create all necessary files 4. Install dependencies (always use latest stable versions) 5. Verify installed versions and inform the user 6. Create a working example based on their agent type 7. Add helpful comments in the code explaining what each part does 8. **VERIFY THE CODE WORKS BEFORE FINISHING**: - For TypeScript: - Run `npx tsc --noEmit` to check for type errors - Fix ALL type errors until types pass completely - Ensure imports and types are correct - Only proceed when type checking passes with no errors - For Python: - Verify imports are correct - Check for basic syntax errors - **DO NOT consider the setup complete until the code verifies successfully** ## Verification After all files are created and dependencies are installed, use the appropriate verifier agent to validate that the Agent SDK application is properly configured and ready for use: 1. **For TypeScript projects**: Launch the **agent-sdk-verifier-ts** agent to validate the setup 2. **For Python projects**: Launch the **agent-sdk-verifier-py** agent to validate the setup 3. The agent will check SDK usage, configuration, functionality, and adherence to official documentation 4. Review the verification report and address any issues ## Getting Started Guide Once setup is complete and verified, provide the user with: 1. **Next steps**: - How to set their API key - How to run their agent: - TypeScript: `npm start` or `node --loader ts-node/esm index.ts` - Python: `python main.py` 2. **Useful resources**: - Link to TypeScript SDK reference: https://docs.claude.com/en/api/agent-sdk/typescript - Link to Python SDK reference: https://docs.claude.com/en/api/agent-sdk/python - Explain key concepts: system prompts, permissions, tools, MCP servers 3. **Common next steps**: - How to customize the system prompt - How to add custom tools via MCP - How to configure permissions - How to create subagents ## Important Notes - **ALWAYS USE LATEST VERSIONS**: Before installing any packages, check for the latest versions using WebSearch or by checking npm/PyPI directly - **VERIFY CODE RUNS CORRECTLY**: - For TypeScript: Run `npx tsc --noEmit` and fix ALL type errors before finishing - For Python: Verify syntax and imports are correct - Do NOT consider the task complete until the code passes verification - Verify the installed version after installation and inform the user - Check the official documentation for any version-specific requirements (Node.js version, Python version, etc.) - Always check if directories/files already exist before creating them - Use the user's preferred package manager (npm, yarn, pnpm for TypeScript; pip, poetry for Python) - Ensure all code examples are functional and include proper error handling - Use modern syntax and patterns that are compatible with the latest SDK version - Make the experience interactive and educational - **ASK QUESTIONS ONE AT A TIME** - Do not ask multiple questions in a single response Begin by asking the FIRST requirement question only. Wait for the user's answer before proceeding to the next question. ================================================ FILE: plugins/claude-opus-4-5-migration/.claude-plugin/plugin.json ================================================ { "name": "claude-opus-4-5-migration", "version": "1.0.0", "description": "Migrate your code and prompts from Sonnet 4.x and Opus 4.1 to Opus 4.5.", "author": { "name": "William Hu", "email": "whu@anthropic.com" } } ================================================ FILE: plugins/claude-opus-4-5-migration/README.md ================================================ # Claude Opus 4.5 Migration Plugin Migrate your code and prompts from Sonnet 4.x and Opus 4.1 to Opus 4.5. ## Overview This skill updates your code and prompts to be compatible with Opus 4.5. It automates the migration process, handling model strings, beta headers, and other configuration details. If you run into any issues with Opus 4.5 after migration, you can continue using this skill to adjust your prompts. ## Usage ``` "Migrate my codebase to Opus 4.5" ``` ## Learn More Refer to our [prompting guide](https://platform.claude.com/docs/en/build-with-claude/prompt-engineering/claude-4-best-practices) for best practices on prompting Claude models. ## Authors William Hu (whu@anthropic.com) ================================================ FILE: plugins/claude-opus-4-5-migration/skills/claude-opus-4-5-migration/SKILL.md ================================================ --- name: claude-opus-4-5-migration description: Migrate prompts and code from Claude Sonnet 4.0, Sonnet 4.5, or Opus 4.1 to Opus 4.5. Use when the user wants to update their codebase, prompts, or API calls to use Opus 4.5. Handles model string updates and prompt adjustments for known Opus 4.5 behavioral differences. Does NOT migrate Haiku 4.5. --- # Opus 4.5 Migration Guide One-shot migration from Sonnet 4.0, Sonnet 4.5, or Opus 4.1 to Opus 4.5. ## Migration Workflow 1. Search codebase for model strings and API calls 2. Update model strings to Opus 4.5 (see platform-specific strings below) 3. Remove unsupported beta headers 4. Add effort parameter set to `"high"` (see `references/effort.md`) 5. Summarize all changes made 6. Tell the user: "If you encounter any issues with Opus 4.5, let me know and I can help adjust your prompts." ## Model String Updates Identify which platform the codebase uses, then replace model strings accordingly. ### Unsupported Beta Headers Remove the `context-1m-2025-08-07` beta header if present—it is not yet supported with Opus 4.5. Leave a comment noting this: ```python # Note: 1M context beta (context-1m-2025-08-07) not yet supported with Opus 4.5 ``` ### Target Model Strings (Opus 4.5) | Platform | Opus 4.5 Model String | |----------|----------------------| | Anthropic API (1P) | `claude-opus-4-5-20251101` | | AWS Bedrock | `anthropic.claude-opus-4-5-20251101-v1:0` | | Google Vertex AI | `claude-opus-4-5@20251101` | | Azure AI Foundry | `claude-opus-4-5-20251101` | ### Source Model Strings to Replace | Source Model | Anthropic API (1P) | AWS Bedrock | Google Vertex AI | |--------------|-------------------|-------------|------------------| | Sonnet 4.0 | `claude-sonnet-4-20250514` | `anthropic.claude-sonnet-4-20250514-v1:0` | `claude-sonnet-4@20250514` | | Sonnet 4.5 | `claude-sonnet-4-5-20250929` | `anthropic.claude-sonnet-4-5-20250929-v1:0` | `claude-sonnet-4-5@20250929` | | Opus 4.1 | `claude-opus-4-1-20250422` | `anthropic.claude-opus-4-1-20250422-v1:0` | `claude-opus-4-1@20250422` | **Do NOT migrate**: Any Haiku models (e.g., `claude-haiku-4-5-20251001`). ## Prompt Adjustments Opus 4.5 has known behavioral differences from previous models. **Only apply these fixes if the user explicitly requests them or reports a specific issue.** By default, just update model strings. **Integration guidelines**: When adding snippets, don't just append them to prompts. Integrate them thoughtfully: - Use XML tags (e.g., ``, ``) to organize additions - Match the style and structure of the existing prompt - Place snippets in logical locations (e.g., coding guidelines near other coding instructions) - If the prompt already uses XML tags, add new content within appropriate existing tags or create consistent new ones ### 1. Tool Overtriggering Opus 4.5 is more responsive to system prompts. Aggressive language that prevented undertriggering on previous models may now cause overtriggering. **Apply if**: User reports tools being called too frequently or unnecessarily. **Find and soften**: - `CRITICAL:` → remove or soften - `You MUST...` → `You should...` - `ALWAYS do X` → `Do X` - `NEVER skip...` → `Don't skip...` - `REQUIRED` → remove or soften Only apply to tool-triggering instructions. Leave other uses of emphasis alone. ### 2. Over-Engineering Prevention Opus 4.5 tends to create extra files, add unnecessary abstractions, or build unrequested flexibility. **Apply if**: User reports unwanted files, excessive abstraction, or unrequested features. Add the snippet from `references/prompt-snippets.md`. ### 3. Code Exploration Opus 4.5 can be overly conservative about exploring code, proposing solutions without reading files. **Apply if**: User reports the model proposing fixes without inspecting relevant code. Add the snippet from `references/prompt-snippets.md`. ### 4. Frontend Design **Apply if**: User requests improved frontend design quality or reports generic-looking outputs. Add the frontend aesthetics snippet from `references/prompt-snippets.md`. ### 5. Thinking Sensitivity When extended thinking is not enabled (the default), Opus 4.5 is particularly sensitive to the word "think" and its variants. Extended thinking is enabled only if the API request contains a `thinking` parameter. **Apply if**: User reports issues related to "thinking" while extended thinking is not enabled (no `thinking` parameter in request). Replace "think" with alternatives like "consider," "believe," or "evaluate." ## Reference See `references/prompt-snippets.md` for the full text of each snippet to add. See `references/effort.md` for configuring the effort parameter (only if user requests it). ================================================ FILE: plugins/claude-opus-4-5-migration/skills/claude-opus-4-5-migration/references/effort.md ================================================ # Effort Parameter (Beta) **Add effort set to `"high"` during migration.** This is the default configuration for best performance with Opus 4.5. ## Overview Effort controls how eagerly Claude spends tokens. It affects all tokens: thinking, text responses, and function calls. | Effort | Use Case | |--------|----------| | `high` | Best performance, deep reasoning (default) | | `medium` | Balance of cost/latency vs. performance | | `low` | Simple, high-volume queries; significant token savings | ## Implementation Requires beta flag `effort-2025-11-24` in API calls. **Python SDK:** ```python response = client.messages.create( model="claude-opus-4-5-20251101", max_tokens=1024, betas=["effort-2025-11-24"], output_config={ "effort": "high" # or "medium" or "low" }, messages=[...] ) ``` **TypeScript SDK:** ```typescript const response = await client.messages.create({ model: "claude-opus-4-5-20251101", max_tokens: 1024, betas: ["effort-2025-11-24"], output_config: { effort: "high" // or "medium" or "low" }, messages: [...] }); ``` **Raw API:** ```json { "model": "claude-opus-4-5-20251101", "max_tokens": 1024, "anthropic-beta": "effort-2025-11-24", "output_config": { "effort": "high" }, "messages": [...] } ``` ## Effort vs. Thinking Budget Effort is independent of thinking budget: - High effort + no thinking = more tokens, but no thinking tokens - High effort + 32k thinking = more tokens, but thinking capped at 32k ## Recommendations 1. First determine effort level, then set thinking budget 2. Best performance: high effort + high thinking budget 3. Cost/latency optimization: medium effort 4. Simple high-volume queries: low effort ================================================ FILE: plugins/claude-opus-4-5-migration/skills/claude-opus-4-5-migration/references/prompt-snippets.md ================================================ # Prompt Snippets for Opus 4.5 Only apply these snippets if the user explicitly requests them or reports a specific issue. By default, the migration should only update model strings. ## 1. Tool Overtriggering **Problem**: Prompts designed to reduce undertriggering on previous models may cause Opus 4.5 to overtrigger. **When to add**: User reports tools being called too frequently or unnecessarily. **Solution**: Replace aggressive language with normal phrasing. | Before | After | |--------|-------| | `CRITICAL: You MUST use this tool when...` | `Use this tool when...` | | `ALWAYS call the search function before...` | `Call the search function before...` | | `You are REQUIRED to...` | `You should...` | | `NEVER skip this step` | `Don't skip this step` | ## 2. Over-Engineering Prevention **Problem**: Opus 4.5 may create extra files, add unnecessary abstractions, or build unrequested flexibility. **When to add**: User reports unwanted files, excessive abstraction, or unrequested features. **Snippet to add to system prompt**: ``` - Avoid over-engineering. Only make changes that are directly requested or clearly necessary. Keep solutions simple and focused. - Don't add features, refactor code, or make "improvements" beyond what was asked. A bug fix doesn't need surrounding code cleaned up. A simple feature doesn't need extra configurability. - Don't add error handling, fallbacks, or validation for scenarios that can't happen. Trust internal code and framework guarantees. Only validate at system boundaries (user input, external APIs). Don't use backwards-compatibility shims when you can just change the code. - Don't create helpers, utilities, or abstractions for one-time operations. Don't design for hypothetical future requirements. The right amount of complexity is the minimum needed for the current task. Reuse existing abstractions where possible and follow the DRY principle. ``` ## 3. Code Exploration **Problem**: Opus 4.5 may propose solutions without reading code or make assumptions about unread files. **When to add**: User reports the model proposing fixes without inspecting relevant code. **Snippet to add to system prompt**: ``` ALWAYS read and understand relevant files before proposing code edits. Do not speculate about code you have not inspected. If the user references a specific file/path, you MUST open and inspect it before explaining or proposing fixes. Be rigorous and persistent in searching code for key facts. Thoroughly review the style, conventions, and abstractions of the codebase before implementing new features or abstractions. ``` ## 4. Frontend Design Quality **Problem**: Default frontend outputs may look generic ("AI slop" aesthetic). **When to add**: User requests improved frontend design quality or reports generic-looking outputs. **Snippet to add to system prompt**: ```xml You tend to converge toward generic, "on distribution" outputs. In frontend design, this creates what users call the "AI slop" aesthetic. Avoid this: make creative, distinctive frontends that surprise and delight. Focus on: - Typography: Choose fonts that are beautiful, unique, and interesting. Avoid generic fonts like Arial and Inter; opt instead for distinctive choices that elevate the frontend's aesthetics. - Color & Theme: Commit to a cohesive aesthetic. Use CSS variables for consistency. Dominant colors with sharp accents outperform timid, evenly-distributed palettes. Draw from IDE themes and cultural aesthetics for inspiration. - Motion: Use animations for effects and micro-interactions. Prioritize CSS-only solutions for HTML. Use Motion library for React when available. Focus on high-impact moments: one well-orchestrated page load with staggered reveals (animation-delay) creates more delight than scattered micro-interactions. - Backgrounds: Create atmosphere and depth rather than defaulting to solid colors. Layer CSS gradients, use geometric patterns, or add contextual effects that match the overall aesthetic. Avoid generic AI-generated aesthetics: - Overused font families (Inter, Roboto, Arial, system fonts) - Clichéd color schemes (particularly purple gradients on white backgrounds) - Predictable layouts and component patterns - Cookie-cutter design that lacks context-specific character Interpret creatively and make unexpected choices that feel genuinely designed for the context. Vary between light and dark themes, different fonts, different aesthetics. You still tend to converge on common choices (Space Grotesk, for example) across generations. Avoid this: it is critical that you think outside the box! ``` ## 5. Thinking Sensitivity **Problem**: When extended thinking is not enabled (the default), Opus 4.5 is particularly sensitive to the word "think" and its variants. Extended thinking is not enabled by default. It is only enabled if the API request contains a `thinking` parameter: ```json "thinking": { "type": "enabled", "budget_tokens": 10000 } ``` **When to apply**: User reports issues related to "thinking" while extended thinking is not enabled (no `thinking` parameter in their request). **Solution**: Replace "think" with alternative words. | Before | After | |--------|-------| | `think about` | `consider` | | `think through` | `evaluate` | | `I think` | `I believe` | | `think carefully` | `consider carefully` | | `thinking` | `reasoning` / `considering` | ## Usage Guidelines 1. **Integrate thoughtfully** - Don't just append snippets; weave them into the existing prompt structure 2. **Use XML tags** - Wrap additions in descriptive tags (e.g., ``, ``) that match or complement existing prompt structure 3. **Match prompt style** - If the prompt is concise, trim the snippet; if verbose, keep full detail 4. **Place logically** - Put coding snippets near other coding instructions, tool guidance near tool definitions, etc. 5. **Preserve existing content** - Insert snippets without removing functional content 6. **Summarize changes** - After migration, list all model string updates and prompt modifications made ================================================ FILE: plugins/code-review/.claude-plugin/plugin.json ================================================ { "name": "code-review", "description": "Automated code review for pull requests using multiple specialized agents with confidence-based scoring", "version": "1.0.0", "author": { "name": "Boris Cherny", "email": "boris@anthropic.com" } } ================================================ FILE: plugins/code-review/README.md ================================================ # Code Review Plugin Automated code review for pull requests using multiple specialized agents with confidence-based scoring to filter false positives. ## Overview The Code Review Plugin automates pull request review by launching multiple agents in parallel to independently audit changes from different perspectives. It uses confidence scoring to filter out false positives, ensuring only high-quality, actionable feedback is posted. ## Commands ### `/code-review` Performs automated code review on a pull request using multiple specialized agents. **What it does:** 1. Checks if review is needed (skips closed, draft, trivial, or already-reviewed PRs) 2. Gathers relevant CLAUDE.md guideline files from the repository 3. Summarizes the pull request changes 4. Launches 4 parallel agents to independently review: - **Agents #1 & #2**: Audit for CLAUDE.md compliance - **Agent #3**: Scan for obvious bugs in changes - **Agent #4**: Analyze git blame/history for context-based issues 5. Scores each issue 0-100 for confidence level 6. Filters out issues below 80 confidence threshold 7. Outputs review (to terminal by default, or as PR comment with `--comment` flag) **Usage:** ```bash /code-review [--comment] ``` **Options:** - `--comment`: Post the review as a comment on the pull request (default: outputs to terminal only) **Example workflow:** ```bash # On a PR branch, run locally (outputs to terminal): /code-review # Post review as PR comment: /code-review --comment # Claude will: # - Launch 4 review agents in parallel # - Score each issue for confidence # - Output issues ≥80 confidence (to terminal or PR depending on flag) # - Skip if no high-confidence issues found ``` **Features:** - Multiple independent agents for comprehensive review - Confidence-based scoring reduces false positives (threshold: 80) - CLAUDE.md compliance checking with explicit guideline verification - Bug detection focused on changes (not pre-existing issues) - Historical context analysis via git blame - Automatic skipping of closed, draft, or already-reviewed PRs - Links directly to code with full SHA and line ranges **Review comment format:** ```markdown ## Code review Found 3 issues: 1. Missing error handling for OAuth callback (CLAUDE.md says "Always handle OAuth errors") https://github.com/owner/repo/blob/abc123.../src/auth.ts#L67-L72 2. Memory leak: OAuth state not cleaned up (bug due to missing cleanup in finally block) https://github.com/owner/repo/blob/abc123.../src/auth.ts#L88-L95 3. Inconsistent naming pattern (src/conventions/CLAUDE.md says "Use camelCase for functions") https://github.com/owner/repo/blob/abc123.../src/utils.ts#L23-L28 ``` **Confidence scoring:** - **0**: Not confident, false positive - **25**: Somewhat confident, might be real - **50**: Moderately confident, real but minor - **75**: Highly confident, real and important - **100**: Absolutely certain, definitely real **False positives filtered:** - Pre-existing issues not introduced in PR - Code that looks like a bug but isn't - Pedantic nitpicks - Issues linters will catch - General quality issues (unless in CLAUDE.md) - Issues with lint ignore comments ## Installation This plugin is included in the Claude Code repository. The command is automatically available when using Claude Code. ## Best Practices ### Using `/code-review` - Maintain clear CLAUDE.md files for better compliance checking - Trust the 80+ confidence threshold - false positives are filtered - Run on all non-trivial pull requests - Review agent findings as a starting point for human review - Update CLAUDE.md based on recurring review patterns ### When to use - All pull requests with meaningful changes - PRs touching critical code paths - PRs from multiple contributors - PRs where guideline compliance matters ### When not to use - Closed or draft PRs (automatically skipped anyway) - Trivial automated PRs (automatically skipped) - Urgent hotfixes requiring immediate merge - PRs already reviewed (automatically skipped) ## Workflow Integration ### Standard PR review workflow: ```bash # Create PR with changes # Run local review (outputs to terminal) /code-review # Review the automated feedback # Make any necessary fixes # Optionally post as PR comment /code-review --comment # Merge when ready ``` ### As part of CI/CD: ```bash # Trigger on PR creation or update # Use --comment flag to post review comments /code-review --comment # Skip if review already exists ``` ## Requirements - Git repository with GitHub integration - GitHub CLI (`gh`) installed and authenticated - CLAUDE.md files (optional but recommended for guideline checking) ## Troubleshooting ### Review takes too long **Issue**: Agents are slow on large PRs **Solution**: - Normal for large changes - agents run in parallel - 4 independent agents ensure thoroughness - Consider splitting large PRs into smaller ones ### Too many false positives **Issue**: Review flags issues that aren't real **Solution**: - Default threshold is 80 (already filters most false positives) - Make CLAUDE.md more specific about what matters - Consider if the flagged issue is actually valid ### No review comment posted **Issue**: `/code-review` runs but no comment appears **Solution**: Check if: - PR is closed (reviews skipped) - PR is draft (reviews skipped) - PR is trivial/automated (reviews skipped) - PR already has review (reviews skipped) - No issues scored ≥80 (no comment needed) ### Link formatting broken **Issue**: Code links don't render correctly in GitHub **Solution**: Links must follow this exact format: ``` https://github.com/owner/repo/blob/[full-sha]/path/file.ext#L[start]-L[end] ``` - Must use full SHA (not abbreviated) - Must use `#L` notation - Must include line range with at least 1 line of context ### GitHub CLI not working **Issue**: `gh` commands fail **Solution**: - Install GitHub CLI: `brew install gh` (macOS) or see [GitHub CLI installation](https://cli.github.com/) - Authenticate: `gh auth login` - Verify repository has GitHub remote ## Tips - **Write specific CLAUDE.md files**: Clear guidelines = better reviews - **Include context in PRs**: Helps agents understand intent - **Use confidence scores**: Issues ≥80 are usually correct - **Iterate on guidelines**: Update CLAUDE.md based on patterns - **Review automatically**: Set up as part of PR workflow - **Trust the filtering**: Threshold prevents noise ## Configuration ### Adjusting confidence threshold The default threshold is 80. To adjust, modify the command file at `commands/code-review.md`: ```markdown Filter out any issues with a score less than 80. ``` Change `80` to your preferred threshold (0-100). ### Customizing review focus Edit `commands/code-review.md` to add or modify agent tasks: - Add security-focused agents - Add performance analysis agents - Add accessibility checking agents - Add documentation quality checks ## Technical Details ### Agent architecture - **2x CLAUDE.md compliance agents**: Redundancy for guideline checks - **1x bug detector**: Focused on obvious bugs in changes only - **1x history analyzer**: Context from git blame and history - **Nx confidence scorers**: One per issue for independent scoring ### Scoring system - Each issue independently scored 0-100 - Scoring considers evidence strength and verification - Threshold (default 80) filters low-confidence issues - For CLAUDE.md issues: verifies guideline explicitly mentions it ### GitHub integration Uses `gh` CLI for: - Viewing PR details and diffs - Fetching repository data - Reading git blame and history - Posting review comments ## Author Boris Cherny (boris@anthropic.com) ## Version 1.0.0 ================================================ FILE: plugins/code-review/commands/code-review.md ================================================ --- allowed-tools: Bash(gh issue view:*), Bash(gh search:*), Bash(gh issue list:*), Bash(gh pr comment:*), Bash(gh pr diff:*), Bash(gh pr view:*), Bash(gh pr list:*), mcp__github_inline_comment__create_inline_comment description: Code review a pull request --- Provide a code review for the given pull request. **Agent assumptions (applies to all agents and subagents):** - All tools are functional and will work without error. Do not test tools or make exploratory calls. Make sure this is clear to every subagent that is launched. - Only call a tool if it is required to complete the task. Every tool call should have a clear purpose. To do this, follow these steps precisely: 1. Launch a haiku agent to check if any of the following are true: - The pull request is closed - The pull request is a draft - The pull request does not need code review (e.g. automated PR, trivial change that is obviously correct) - Claude has already commented on this PR (check `gh pr view --comments` for comments left by claude) If any condition is true, stop and do not proceed. Note: Still review Claude generated PR's. 2. Launch a haiku agent to return a list of file paths (not their contents) for all relevant CLAUDE.md files including: - The root CLAUDE.md file, if it exists - Any CLAUDE.md files in directories containing files modified by the pull request 3. Launch a sonnet agent to view the pull request and return a summary of the changes 4. Launch 4 agents in parallel to independently review the changes. Each agent should return the list of issues, where each issue includes a description and the reason it was flagged (e.g. "CLAUDE.md adherence", "bug"). The agents should do the following: Agents 1 + 2: CLAUDE.md compliance sonnet agents Audit changes for CLAUDE.md compliance in parallel. Note: When evaluating CLAUDE.md compliance for a file, you should only consider CLAUDE.md files that share a file path with the file or parents. Agent 3: Opus bug agent (parallel subagent with agent 4) Scan for obvious bugs. Focus only on the diff itself without reading extra context. Flag only significant bugs; ignore nitpicks and likely false positives. Do not flag issues that you cannot validate without looking at context outside of the git diff. Agent 4: Opus bug agent (parallel subagent with agent 3) Look for problems that exist in the introduced code. This could be security issues, incorrect logic, etc. Only look for issues that fall within the changed code. **CRITICAL: We only want HIGH SIGNAL issues.** Flag issues where: - The code will fail to compile or parse (syntax errors, type errors, missing imports, unresolved references) - The code will definitely produce wrong results regardless of inputs (clear logic errors) - Clear, unambiguous CLAUDE.md violations where you can quote the exact rule being broken Do NOT flag: - Code style or quality concerns - Potential issues that depend on specific inputs or state - Subjective suggestions or improvements If you are not certain an issue is real, do not flag it. False positives erode trust and waste reviewer time. In addition to the above, each subagent should be told the PR title and description. This will help provide context regarding the author's intent. 5. For each issue found in the previous step by agents 3 and 4, launch parallel subagents to validate the issue. These subagents should get the PR title and description along with a description of the issue. The agent's job is to review the issue to validate that the stated issue is truly an issue with high confidence. For example, if an issue such as "variable is not defined" was flagged, the subagent's job would be to validate that is actually true in the code. Another example would be CLAUDE.md issues. The agent should validate that the CLAUDE.md rule that was violated is scoped for this file and is actually violated. Use Opus subagents for bugs and logic issues, and sonnet agents for CLAUDE.md violations. 6. Filter out any issues that were not validated in step 5. This step will give us our list of high signal issues for our review. 7. Output a summary of the review findings to the terminal: - If issues were found, list each issue with a brief description. - If no issues were found, state: "No issues found. Checked for bugs and CLAUDE.md compliance." If `--comment` argument was NOT provided, stop here. Do not post any GitHub comments. If `--comment` argument IS provided and NO issues were found, post a summary comment using `gh pr comment` and stop. If `--comment` argument IS provided and issues were found, continue to step 8. 8. Create a list of all comments that you plan on leaving. This is only for you to make sure you are comfortable with the comments. Do not post this list anywhere. 9. Post inline comments for each issue using `mcp__github_inline_comment__create_inline_comment` with `confirmed: true`. For each comment: - Provide a brief description of the issue - For small, self-contained fixes, include a committable suggestion block - For larger fixes (6+ lines, structural changes, or changes spanning multiple locations), describe the issue and suggested fix without a suggestion block - Never post a committable suggestion UNLESS committing the suggestion fixes the issue entirely. If follow up steps are required, do not leave a committable suggestion. **IMPORTANT: Only post ONE comment per unique issue. Do not post duplicate comments.** Use this list when evaluating issues in Steps 4 and 5 (these are false positives, do NOT flag): - Pre-existing issues - Something that appears to be a bug but is actually correct - Pedantic nitpicks that a senior engineer would not flag - Issues that a linter will catch (do not run the linter to verify) - General code quality concerns (e.g., lack of test coverage, general security issues) unless explicitly required in CLAUDE.md - Issues mentioned in CLAUDE.md but explicitly silenced in the code (e.g., via a lint ignore comment) Notes: - Use gh CLI to interact with GitHub (e.g., fetch pull requests, create comments). Do not use web fetch. - Create a todo list before starting. - You must cite and link each issue in inline comments (e.g., if referring to a CLAUDE.md, include a link to it). - If no issues are found and `--comment` argument is provided, post a comment with the following format: --- ## Code review No issues found. Checked for bugs and CLAUDE.md compliance. --- - When linking to code in inline comments, follow the following format precisely, otherwise the Markdown preview won't render correctly: https://github.com/anthropics/claude-code/blob/c21d3c10bc8e898b7ac1a2d745bdc9bc4e423afe/package.json#L10-L15 - Requires full git sha - You must provide the full sha. Commands like `https://github.com/owner/repo/blob/$(git rev-parse HEAD)/foo/bar` will not work, since your comment will be directly rendered in Markdown. - Repo name must match the repo you're code reviewing - # sign after the file name - Line range format is L[start]-L[end] - Provide at least 1 line of context before and after, centered on the line you are commenting about (eg. if you are commenting about lines 5-6, you should link to `L4-7`) ================================================ FILE: plugins/commit-commands/.claude-plugin/plugin.json ================================================ { "name": "commit-commands", "description": "Streamline your git workflow with simple commands for committing, pushing, and creating pull requests", "version": "1.0.0", "author": { "name": "Anthropic", "email": "support@anthropic.com" } } ================================================ FILE: plugins/commit-commands/README.md ================================================ # Commit Commands Plugin Streamline your git workflow with simple commands for committing, pushing, and creating pull requests. ## Overview The Commit Commands Plugin automates common git operations, reducing context switching and manual command execution. Instead of running multiple git commands, use a single slash command to handle your entire workflow. ## Commands ### `/commit` Creates a git commit with an automatically generated commit message based on staged and unstaged changes. **What it does:** 1. Analyzes current git status 2. Reviews both staged and unstaged changes 3. Examines recent commit messages to match your repository's style 4. Drafts an appropriate commit message 5. Stages relevant files 6. Creates the commit **Usage:** ```bash /commit ``` **Example workflow:** ```bash # Make some changes to your code # Then simply run: /commit # Claude will: # - Review your changes # - Stage the files # - Create a commit with an appropriate message # - Show you the commit status ``` **Features:** - Automatically drafts commit messages that match your repo's style - Follows conventional commit practices - Avoids committing files with secrets (.env, credentials.json) - Includes Claude Code attribution in commit message ### `/commit-push-pr` Complete workflow command that commits, pushes, and creates a pull request in one step. **What it does:** 1. Creates a new branch (if currently on main) 2. Stages and commits changes with an appropriate message 3. Pushes the branch to origin 4. Creates a pull request using `gh pr create` 5. Provides the PR URL **Usage:** ```bash /commit-push-pr ``` **Example workflow:** ```bash # Make your changes # Then run: /commit-push-pr # Claude will: # - Create a feature branch (if needed) # - Commit your changes # - Push to remote # - Open a PR with summary and test plan # - Give you the PR URL to review ``` **Features:** - Analyzes all commits in the branch (not just the latest) - Creates comprehensive PR descriptions with: - Summary of changes (1-3 bullet points) - Test plan checklist - Claude Code attribution - Handles branch creation automatically - Uses GitHub CLI (`gh`) for PR creation **Requirements:** - GitHub CLI (`gh`) must be installed and authenticated - Repository must have a remote named `origin` ### `/clean_gone` Cleans up local branches that have been deleted from the remote repository. **What it does:** 1. Lists all local branches to identify [gone] status 2. Identifies and removes worktrees associated with [gone] branches 3. Deletes all branches marked as [gone] 4. Provides feedback on removed branches **Usage:** ```bash /clean_gone ``` **Example workflow:** ```bash # After PRs are merged and remote branches are deleted /clean_gone # Claude will: # - Find all branches marked as [gone] # - Remove any associated worktrees # - Delete the stale local branches # - Report what was cleaned up ``` **Features:** - Handles both regular branches and worktree branches - Safely removes worktrees before deleting branches - Shows clear feedback about what was removed - Reports if no cleanup was needed **When to use:** - After merging and deleting remote branches - When your local branch list is cluttered with stale branches - During regular repository maintenance ## Installation This plugin is included in the Claude Code repository. The commands are automatically available when using Claude Code. ## Best Practices ### Using `/commit` - Review the staged changes before committing - Let Claude analyze your changes and match your repo's commit style - Trust the automated message, but verify it's accurate - Use for routine commits during development ### Using `/commit-push-pr` - Use when you're ready to create a PR - Ensure all your changes are complete and tested - Claude will analyze the full branch history for the PR description - Review the PR description and edit if needed - Use when you want to minimize context switching ### Using `/clean_gone` - Run periodically to keep your branch list clean - Especially useful after merging multiple PRs - Safe to run - only removes branches already deleted remotely - Helps maintain a tidy local repository ## Workflow Integration ### Quick commit workflow: ```bash # Write code /commit # Continue development ``` ### Feature branch workflow: ```bash # Develop feature across multiple commits /commit # First commit # More changes /commit # Second commit # Ready to create PR /commit-push-pr ``` ### Maintenance workflow: ```bash # After several PRs are merged /clean_gone # Clean workspace ready for next feature ``` ## Requirements - Git must be installed and configured - For `/commit-push-pr`: GitHub CLI (`gh`) must be installed and authenticated - Repository must be a git repository with a remote ## Troubleshooting ### `/commit` creates empty commit **Issue**: No changes to commit **Solution**: - Ensure you have unstaged or staged changes - Run `git status` to verify changes exist ### `/commit-push-pr` fails to create PR **Issue**: `gh pr create` command fails **Solution**: - Install GitHub CLI: `brew install gh` (macOS) or see [GitHub CLI installation](https://cli.github.com/) - Authenticate: `gh auth login` - Ensure repository has a GitHub remote ### `/clean_gone` doesn't find branches **Issue**: No branches marked as [gone] **Solution**: - Run `git fetch --prune` to update remote tracking - Branches must be deleted from the remote to show as [gone] ## Tips - **Combine with other tools**: Use `/commit` during development, then `/commit-push-pr` when ready - **Let Claude draft messages**: The commit message analysis learns from your repo's style - **Regular cleanup**: Run `/clean_gone` weekly to maintain a clean branch list - **Review before pushing**: Always review the commit message and changes before pushing ## Author Anthropic (support@anthropic.com) ## Version 1.0.0 ================================================ FILE: plugins/commit-commands/commands/clean_gone.md ================================================ --- description: Cleans up all git branches marked as [gone] (branches that have been deleted on the remote but still exist locally), including removing associated worktrees. --- ## Your Task You need to execute the following bash commands to clean up stale local branches that have been deleted from the remote repository. ## Commands to Execute 1. **First, list branches to identify any with [gone] status** Execute this command: ```bash git branch -v ``` Note: Branches with a '+' prefix have associated worktrees and must have their worktrees removed before deletion. 2. **Next, identify worktrees that need to be removed for [gone] branches** Execute this command: ```bash git worktree list ``` 3. **Finally, remove worktrees and delete [gone] branches (handles both regular and worktree branches)** Execute this command: ```bash # Process all [gone] branches, removing '+' prefix if present git branch -v | grep '\[gone\]' | sed 's/^[+* ]//' | awk '{print $1}' | while read branch; do echo "Processing branch: $branch" # Find and remove worktree if it exists worktree=$(git worktree list | grep "\\[$branch\\]" | awk '{print $1}') if [ ! -z "$worktree" ] && [ "$worktree" != "$(git rev-parse --show-toplevel)" ]; then echo " Removing worktree: $worktree" git worktree remove --force "$worktree" fi # Delete the branch echo " Deleting branch: $branch" git branch -D "$branch" done ``` ## Expected Behavior After executing these commands, you will: - See a list of all local branches with their status - Identify and remove any worktrees associated with [gone] branches - Delete all branches marked as [gone] - Provide feedback on which worktrees and branches were removed If no branches are marked as [gone], report that no cleanup was needed. ================================================ FILE: plugins/commit-commands/commands/commit-push-pr.md ================================================ --- allowed-tools: Bash(git checkout --branch:*), Bash(git add:*), Bash(git status:*), Bash(git push:*), Bash(git commit:*), Bash(gh pr create:*) description: Commit, push, and open a PR --- ## Context - Current git status: !`git status` - Current git diff (staged and unstaged changes): !`git diff HEAD` - Current branch: !`git branch --show-current` ## Your task Based on the above changes: 1. Create a new branch if on main 2. Create a single commit with an appropriate message 3. Push the branch to origin 4. Create a pull request using `gh pr create` 5. You have the capability to call multiple tools in a single response. You MUST do all of the above in a single message. Do not use any other tools or do anything else. Do not send any other text or messages besides these tool calls. ================================================ FILE: plugins/commit-commands/commands/commit.md ================================================ --- allowed-tools: Bash(git add:*), Bash(git status:*), Bash(git commit:*) description: Create a git commit --- ## Context - Current git status: !`git status` - Current git diff (staged and unstaged changes): !`git diff HEAD` - Current branch: !`git branch --show-current` - Recent commits: !`git log --oneline -10` ## Your task Based on the above changes, create a single git commit. You have the capability to call multiple tools in a single response. Stage and create the commit using a single message. Do not use any other tools or do anything else. Do not send any other text or messages besides these tool calls. ================================================ FILE: plugins/explanatory-output-style/.claude-plugin/plugin.json ================================================ { "name": "explanatory-output-style", "version": "1.0.0", "description": "Adds educational insights about implementation choices and codebase patterns (mimics the deprecated Explanatory output style)", "author": { "name": "Dickson Tsai", "email": "dickson@anthropic.com" } } ================================================ FILE: plugins/explanatory-output-style/README.md ================================================ # Explanatory Output Style Plugin This plugin recreates the deprecated Explanatory output style as a SessionStart hook. WARNING: Do not install this plugin unless you are fine with incurring the token cost of this plugin's additional instructions and output. ## What it does When enabled, this plugin automatically adds instructions at the start of each session that encourage Claude to: 1. Provide educational insights about implementation choices 2. Explain codebase patterns and decisions 3. Balance task completion with learning opportunities ## How it works The plugin uses a SessionStart hook to inject additional context into every session. This context instructs Claude to provide brief educational explanations before and after writing code, formatted as: ``` `★ Insight ─────────────────────────────────────` [2-3 key educational points] `─────────────────────────────────────────────────` ``` ## Usage Once installed, the plugin activates automatically at the start of every session. No additional configuration is needed. The insights focus on: - Specific implementation choices for your codebase - Patterns and conventions in your code - Trade-offs and design decisions - Codebase-specific details rather than general programming concepts ## Migration from Output Styles This plugin replaces the deprecated "Explanatory" output style setting. If you previously used: ```json { "outputStyle": "Explanatory" } ``` You can now achieve the same behavior by installing this plugin instead. More generally, this SessionStart hook pattern is roughly equivalent to CLAUDE.md, but it is more flexible and allows for distribution through plugins. Note: Output styles that involve tasks besides software development, are better expressed as [subagents](https://docs.claude.com/en/docs/claude-code/sub-agents), not as SessionStart hooks. Subagents change the system prompt while SessionStart hooks add to the default system prompt. ## Managing changes - Disable the plugin - keep the code installed on your device - Uninstall the plugin - remove the code from your device - Update the plugin - create a local copy of this plugin to personalize this plugin - Hint: Ask Claude to read https://docs.claude.com/en/docs/claude-code/plugins.md and set it up for you! ================================================ FILE: plugins/explanatory-output-style/hooks/hooks.json ================================================ { "description": "Explanatory mode hook that adds educational insights instructions", "hooks": { "SessionStart": [ { "hooks": [ { "type": "command", "command": "${CLAUDE_PLUGIN_ROOT}/hooks-handlers/session-start.sh" } ] } ] } } ================================================ FILE: plugins/explanatory-output-style/hooks-handlers/session-start.sh ================================================ #!/usr/bin/env bash # Output the explanatory mode instructions as additionalContext # This mimics the deprecated Explanatory output style cat << 'EOF' { "hookSpecificOutput": { "hookEventName": "SessionStart", "additionalContext": "You are in 'explanatory' output style mode, where you should provide educational insights about the codebase as you help with the user's task.\n\nYou should be clear and educational, providing helpful explanations while remaining focused on the task. Balance educational content with task completion. When providing insights, you may exceed typical length constraints, but remain focused and relevant.\n\n## Insights\nIn order to encourage learning, before and after writing code, always provide brief educational explanations about implementation choices using (with backticks):\n\"`★ Insight ─────────────────────────────────────`\n[2-3 key educational points]\n`─────────────────────────────────────────────────`\"\n\nThese insights should be included in the conversation, not in the codebase. You should generally focus on interesting insights that are specific to the codebase or the code you just wrote, rather than general programming concepts. Do not wait until the end to provide insights. Provide them as you write code." } } EOF exit 0 ================================================ FILE: plugins/feature-dev/.claude-plugin/plugin.json ================================================ { "name": "feature-dev", "version": "1.0.0", "description": "Comprehensive feature development workflow with specialized agents for codebase exploration, architecture design, and quality review", "author": { "name": "Sid Bidasaria", "email": "sbidasaria@anthropic.com" } } ================================================ FILE: plugins/feature-dev/README.md ================================================ # Feature Development Plugin A comprehensive, structured workflow for feature development with specialized agents for codebase exploration, architecture design, and quality review. ## Overview The Feature Development Plugin provides a systematic 7-phase approach to building new features. Instead of jumping straight into code, it guides you through understanding the codebase, asking clarifying questions, designing architecture, and ensuring quality—resulting in better-designed features that integrate seamlessly with your existing code. ## Philosophy Building features requires more than just writing code. You need to: - **Understand the codebase** before making changes - **Ask questions** to clarify ambiguous requirements - **Design thoughtfully** before implementing - **Review for quality** after building This plugin embeds these practices into a structured workflow that runs automatically when you use the `/feature-dev` command. ## Command: `/feature-dev` Launches a guided feature development workflow with 7 distinct phases. **Usage:** ```bash /feature-dev Add user authentication with OAuth ``` Or simply: ```bash /feature-dev ``` The command will guide you through the entire process interactively. ## The 7-Phase Workflow ### Phase 1: Discovery **Goal**: Understand what needs to be built **What happens:** - Clarifies the feature request if it's unclear - Asks what problem you're solving - Identifies constraints and requirements - Summarizes understanding and confirms with you **Example:** ``` You: /feature-dev Add caching Claude: Let me understand what you need... - What should be cached? (API responses, computed values, etc.) - What are your performance requirements? - Do you have a preferred caching solution? ``` ### Phase 2: Codebase Exploration **Goal**: Understand relevant existing code and patterns **What happens:** - Launches 2-3 `code-explorer` agents in parallel - Each agent explores different aspects (similar features, architecture, UI patterns) - Agents return comprehensive analyses with key files to read - Claude reads all identified files to build deep understanding - Presents comprehensive summary of findings **Agents launched:** - "Find features similar to [feature] and trace implementation" - "Map the architecture and abstractions for [area]" - "Analyze current implementation of [related feature]" **Example output:** ``` Found similar features: - User authentication (src/auth/): Uses JWT tokens, middleware pattern - Session management (src/session/): Redis-backed, 24hr expiry - API security (src/api/middleware/): Rate limiting, CORS Key files to understand: - src/auth/AuthService.ts:45 - Core authentication logic - src/middleware/authMiddleware.ts:12 - Request authentication - src/config/security.ts:8 - Security configuration ``` ### Phase 3: Clarifying Questions **Goal**: Fill in gaps and resolve all ambiguities **What happens:** - Reviews codebase findings and feature request - Identifies underspecified aspects: - Edge cases - Error handling - Integration points - Backward compatibility - Performance needs - Presents all questions in an organized list - **Waits for your answers before proceeding** **Example:** ``` Before designing the architecture, I need to clarify: 1. OAuth provider: Which OAuth providers? (Google, GitHub, custom?) 2. User data: Store OAuth tokens or just user profile? 3. Existing auth: Replace current auth or add alongside? 4. Sessions: Integrate with existing session management? 5. Error handling: How to handle OAuth failures? ``` **Critical**: This phase ensures nothing is ambiguous before design begins. ### Phase 4: Architecture Design **Goal**: Design multiple implementation approaches **What happens:** - Launches 2-3 `code-architect` agents with different focuses: - **Minimal changes**: Smallest change, maximum reuse - **Clean architecture**: Maintainability, elegant abstractions - **Pragmatic balance**: Speed + quality - Reviews all approaches - Forms opinion on which fits best for this task - Presents comparison with trade-offs and recommendation - **Asks which approach you prefer** **Example output:** ``` I've designed 3 approaches: Approach 1: Minimal Changes - Extend existing AuthService with OAuth methods - Add new OAuth routes to existing auth router - Minimal refactoring required Pros: Fast, low risk Cons: Couples OAuth to existing auth, harder to test Approach 2: Clean Architecture - New OAuthService with dedicated interface - Separate OAuth router and middleware - Refactor AuthService to use common interface Pros: Clean separation, testable, maintainable Cons: More files, more refactoring Approach 3: Pragmatic Balance - New OAuthProvider abstraction - Integrate into existing AuthService - Minimal refactoring, good boundaries Pros: Balanced complexity and cleanliness Cons: Some coupling remains Recommendation: Approach 3 - gives you clean boundaries without excessive refactoring, and fits your existing architecture well. Which approach would you like to use? ``` ### Phase 5: Implementation **Goal**: Build the feature **What happens:** - **Waits for explicit approval** before starting - Reads all relevant files identified in previous phases - Implements following chosen architecture - Follows codebase conventions strictly - Writes clean, well-documented code - Updates todos as progress is made **Notes:** - Implementation only starts after you approve - Follows patterns discovered in Phase 2 - Uses architecture designed in Phase 4 - Continuously tracks progress ### Phase 6: Quality Review **Goal**: Ensure code is simple, DRY, elegant, and functionally correct **What happens:** - Launches 3 `code-reviewer` agents in parallel with different focuses: - **Simplicity/DRY/Elegance**: Code quality and maintainability - **Bugs/Correctness**: Functional correctness and logic errors - **Conventions/Abstractions**: Project standards and patterns - Consolidates findings - Identifies highest severity issues - **Presents findings and asks what you want to do**: - Fix now - Fix later - Proceed as-is - Addresses issues based on your decision **Example output:** ``` Code Review Results: High Priority Issues: 1. Missing error handling in OAuth callback (src/auth/oauth.ts:67) 2. Memory leak: OAuth state not cleaned up (src/auth/oauth.ts:89) Medium Priority: 1. Could simplify token refresh logic (src/auth/oauth.ts:120) 2. Consider extracting OAuth config validation All tests pass. Code follows project conventions. What would you like to do? ``` ### Phase 7: Summary **Goal**: Document what was accomplished **What happens:** - Marks all todos complete - Summarizes: - What was built - Key decisions made - Files modified - Suggested next steps **Example:** ``` Feature Complete: OAuth Authentication What was built: - OAuth provider abstraction supporting Google and GitHub - OAuth routes and middleware integrated with existing auth - Token refresh and session integration - Error handling for all OAuth flows Key decisions: - Used pragmatic approach with OAuthProvider abstraction - Integrated with existing session management - Added OAuth state to prevent CSRF Files modified: - src/auth/OAuthProvider.ts (new) - src/auth/AuthService.ts - src/routes/auth.ts - src/middleware/authMiddleware.ts Suggested next steps: - Add tests for OAuth flows - Add more OAuth providers (Microsoft, Apple) - Update documentation ``` ## Agents ### `code-explorer` **Purpose**: Deeply analyzes existing codebase features by tracing execution paths **Focus areas:** - Entry points and call chains - Data flow and transformations - Architecture layers and patterns - Dependencies and integrations - Implementation details **When triggered:** - Automatically in Phase 2 - Can be invoked manually when exploring code **Output:** - Entry points with file:line references - Step-by-step execution flow - Key components and responsibilities - Architecture insights - List of essential files to read ### `code-architect` **Purpose**: Designs feature architectures and implementation blueprints **Focus areas:** - Codebase pattern analysis - Architecture decisions - Component design - Implementation roadmap - Data flow and build sequence **When triggered:** - Automatically in Phase 4 - Can be invoked manually for architecture design **Output:** - Patterns and conventions found - Architecture decision with rationale - Complete component design - Implementation map with specific files - Build sequence with phases ### `code-reviewer` **Purpose**: Reviews code for bugs, quality issues, and project conventions **Focus areas:** - Project guideline compliance (CLAUDE.md) - Bug detection - Code quality issues - Confidence-based filtering (only reports high-confidence issues ≥80) **When triggered:** - Automatically in Phase 6 - Can be invoked manually after writing code **Output:** - Critical issues (confidence 75-100) - Important issues (confidence 50-74) - Specific fixes with file:line references - Project guideline references ## Usage Patterns ### Full workflow (recommended for new features): ```bash /feature-dev Add rate limiting to API endpoints ``` Let the workflow guide you through all 7 phases. ### Manual agent invocation: **Explore a feature:** ``` "Launch code-explorer to trace how authentication works" ``` **Design architecture:** ``` "Launch code-architect to design the caching layer" ``` **Review code:** ``` "Launch code-reviewer to check my recent changes" ``` ## Best Practices 1. **Use the full workflow for complex features**: The 7 phases ensure thorough planning 2. **Answer clarifying questions thoughtfully**: Phase 3 prevents future confusion 3. **Choose architecture deliberately**: Phase 4 gives you options for a reason 4. **Don't skip code review**: Phase 6 catches issues before they reach production 5. **Read the suggested files**: Phase 2 identifies key files—read them to understand context ## When to Use This Plugin **Use for:** - New features that touch multiple files - Features requiring architectural decisions - Complex integrations with existing code - Features where requirements are somewhat unclear **Don't use for:** - Single-line bug fixes - Trivial changes - Well-defined, simple tasks - Urgent hotfixes ## Requirements - Claude Code installed - Git repository (for code review) - Project with existing codebase (workflow assumes existing code to learn from) ## Troubleshooting ### Agents take too long **Issue**: Code exploration or architecture agents are slow **Solution**: - This is normal for large codebases - Agents run in parallel when possible - The thoroughness pays off in better understanding ### Too many clarifying questions **Issue**: Phase 3 asks too many questions **Solution**: - Be more specific in your initial feature request - Provide context about constraints upfront - Say "whatever you think is best" if truly no preference ### Architecture options overwhelming **Issue**: Too many architecture options in Phase 4 **Solution**: - Trust the recommendation—it's based on codebase analysis - If still unsure, ask for more explanation - Pick the pragmatic option when in doubt ## Tips - **Be specific in your feature request**: More detail = fewer clarifying questions - **Trust the process**: Each phase builds on the previous one - **Review agent outputs**: Agents provide valuable insights about your codebase - **Don't skip phases**: Each phase serves a purpose - **Use for learning**: The exploration phase teaches you about your own codebase ## Author Sid Bidasaria (sbidasaria@anthropic.com) ## Version 1.0.0 ================================================ FILE: plugins/feature-dev/agents/code-architect.md ================================================ --- name: code-architect description: Designs feature architectures by analyzing existing codebase patterns and conventions, then providing comprehensive implementation blueprints with specific files to create/modify, component designs, data flows, and build sequences tools: Glob, Grep, LS, Read, NotebookRead, WebFetch, TodoWrite, WebSearch, KillShell, BashOutput model: sonnet color: green --- You are a senior software architect who delivers comprehensive, actionable architecture blueprints by deeply understanding codebases and making confident architectural decisions. ## Core Process **1. Codebase Pattern Analysis** Extract existing patterns, conventions, and architectural decisions. Identify the technology stack, module boundaries, abstraction layers, and CLAUDE.md guidelines. Find similar features to understand established approaches. **2. Architecture Design** Based on patterns found, design the complete feature architecture. Make decisive choices - pick one approach and commit. Ensure seamless integration with existing code. Design for testability, performance, and maintainability. **3. Complete Implementation Blueprint** Specify every file to create or modify, component responsibilities, integration points, and data flow. Break implementation into clear phases with specific tasks. ## Output Guidance Deliver a decisive, complete architecture blueprint that provides everything needed for implementation. Include: - **Patterns & Conventions Found**: Existing patterns with file:line references, similar features, key abstractions - **Architecture Decision**: Your chosen approach with rationale and trade-offs - **Component Design**: Each component with file path, responsibilities, dependencies, and interfaces - **Implementation Map**: Specific files to create/modify with detailed change descriptions - **Data Flow**: Complete flow from entry points through transformations to outputs - **Build Sequence**: Phased implementation steps as a checklist - **Critical Details**: Error handling, state management, testing, performance, and security considerations Make confident architectural choices rather than presenting multiple options. Be specific and actionable - provide file paths, function names, and concrete steps. ================================================ FILE: plugins/feature-dev/agents/code-explorer.md ================================================ --- name: code-explorer description: Deeply analyzes existing codebase features by tracing execution paths, mapping architecture layers, understanding patterns and abstractions, and documenting dependencies to inform new development tools: Glob, Grep, LS, Read, NotebookRead, WebFetch, TodoWrite, WebSearch, KillShell, BashOutput model: sonnet color: yellow --- You are an expert code analyst specializing in tracing and understanding feature implementations across codebases. ## Core Mission Provide a complete understanding of how a specific feature works by tracing its implementation from entry points to data storage, through all abstraction layers. ## Analysis Approach **1. Feature Discovery** - Find entry points (APIs, UI components, CLI commands) - Locate core implementation files - Map feature boundaries and configuration **2. Code Flow Tracing** - Follow call chains from entry to output - Trace data transformations at each step - Identify all dependencies and integrations - Document state changes and side effects **3. Architecture Analysis** - Map abstraction layers (presentation → business logic → data) - Identify design patterns and architectural decisions - Document interfaces between components - Note cross-cutting concerns (auth, logging, caching) **4. Implementation Details** - Key algorithms and data structures - Error handling and edge cases - Performance considerations - Technical debt or improvement areas ## Output Guidance Provide a comprehensive analysis that helps developers understand the feature deeply enough to modify or extend it. Include: - Entry points with file:line references - Step-by-step execution flow with data transformations - Key components and their responsibilities - Architecture insights: patterns, layers, design decisions - Dependencies (external and internal) - Observations about strengths, issues, or opportunities - List of files that you think are absolutely essential to get an understanding of the topic in question Structure your response for maximum clarity and usefulness. Always include specific file paths and line numbers. ================================================ FILE: plugins/feature-dev/agents/code-reviewer.md ================================================ --- name: code-reviewer description: Reviews code for bugs, logic errors, security vulnerabilities, code quality issues, and adherence to project conventions, using confidence-based filtering to report only high-priority issues that truly matter tools: Glob, Grep, LS, Read, NotebookRead, WebFetch, TodoWrite, WebSearch, KillShell, BashOutput model: sonnet color: red --- You are an expert code reviewer specializing in modern software development across multiple languages and frameworks. Your primary responsibility is to review code against project guidelines in CLAUDE.md with high precision to minimize false positives. ## Review Scope By default, review unstaged changes from `git diff`. The user may specify different files or scope to review. ## Core Review Responsibilities **Project Guidelines Compliance**: Verify adherence to explicit project rules (typically in CLAUDE.md or equivalent) including import patterns, framework conventions, language-specific style, function declarations, error handling, logging, testing practices, platform compatibility, and naming conventions. **Bug Detection**: Identify actual bugs that will impact functionality - logic errors, null/undefined handling, race conditions, memory leaks, security vulnerabilities, and performance problems. **Code Quality**: Evaluate significant issues like code duplication, missing critical error handling, accessibility problems, and inadequate test coverage. ## Confidence Scoring Rate each potential issue on a scale from 0-100: - **0**: Not confident at all. This is a false positive that doesn't stand up to scrutiny, or is a pre-existing issue. - **25**: Somewhat confident. This might be a real issue, but may also be a false positive. If stylistic, it wasn't explicitly called out in project guidelines. - **50**: Moderately confident. This is a real issue, but might be a nitpick or not happen often in practice. Not very important relative to the rest of the changes. - **75**: Highly confident. Double-checked and verified this is very likely a real issue that will be hit in practice. The existing approach is insufficient. Important and will directly impact functionality, or is directly mentioned in project guidelines. - **100**: Absolutely certain. Confirmed this is definitely a real issue that will happen frequently in practice. The evidence directly confirms this. **Only report issues with confidence ≥ 80.** Focus on issues that truly matter - quality over quantity. ## Output Guidance Start by clearly stating what you're reviewing. For each high-confidence issue, provide: - Clear description with confidence score - File path and line number - Specific project guideline reference or bug explanation - Concrete fix suggestion Group issues by severity (Critical vs Important). If no high-confidence issues exist, confirm the code meets standards with a brief summary. Structure your response for maximum actionability - developers should know exactly what to fix and why. ================================================ FILE: plugins/feature-dev/commands/feature-dev.md ================================================ --- description: Guided feature development with codebase understanding and architecture focus argument-hint: Optional feature description --- # Feature Development You are helping a developer implement a new feature. Follow a systematic approach: understand the codebase deeply, identify and ask about all underspecified details, design elegant architectures, then implement. ## Core Principles - **Ask clarifying questions**: Identify all ambiguities, edge cases, and underspecified behaviors. Ask specific, concrete questions rather than making assumptions. Wait for user answers before proceeding with implementation. Ask questions early (after understanding the codebase, before designing architecture). - **Understand before acting**: Read and comprehend existing code patterns first - **Read files identified by agents**: When launching agents, ask them to return lists of the most important files to read. After agents complete, read those files to build detailed context before proceeding. - **Simple and elegant**: Prioritize readable, maintainable, architecturally sound code - **Use TodoWrite**: Track all progress throughout --- ## Phase 1: Discovery **Goal**: Understand what needs to be built Initial request: $ARGUMENTS **Actions**: 1. Create todo list with all phases 2. If feature unclear, ask user for: - What problem are they solving? - What should the feature do? - Any constraints or requirements? 3. Summarize understanding and confirm with user --- ## Phase 2: Codebase Exploration **Goal**: Understand relevant existing code and patterns at both high and low levels **Actions**: 1. Launch 2-3 code-explorer agents in parallel. Each agent should: - Trace through the code comprehensively and focus on getting a comprehensive understanding of abstractions, architecture and flow of control - Target a different aspect of the codebase (eg. similar features, high level understanding, architectural understanding, user experience, etc) - Include a list of 5-10 key files to read **Example agent prompts**: - "Find features similar to [feature] and trace through their implementation comprehensively" - "Map the architecture and abstractions for [feature area], tracing through the code comprehensively" - "Analyze the current implementation of [existing feature/area], tracing through the code comprehensively" - "Identify UI patterns, testing approaches, or extension points relevant to [feature]" 2. Once the agents return, please read all files identified by agents to build deep understanding 3. Present comprehensive summary of findings and patterns discovered --- ## Phase 3: Clarifying Questions **Goal**: Fill in gaps and resolve all ambiguities before designing **CRITICAL**: This is one of the most important phases. DO NOT SKIP. **Actions**: 1. Review the codebase findings and original feature request 2. Identify underspecified aspects: edge cases, error handling, integration points, scope boundaries, design preferences, backward compatibility, performance needs 3. **Present all questions to the user in a clear, organized list** 4. **Wait for answers before proceeding to architecture design** If the user says "whatever you think is best", provide your recommendation and get explicit confirmation. --- ## Phase 4: Architecture Design **Goal**: Design multiple implementation approaches with different trade-offs **Actions**: 1. Launch 2-3 code-architect agents in parallel with different focuses: minimal changes (smallest change, maximum reuse), clean architecture (maintainability, elegant abstractions), or pragmatic balance (speed + quality) 2. Review all approaches and form your opinion on which fits best for this specific task (consider: small fix vs large feature, urgency, complexity, team context) 3. Present to user: brief summary of each approach, trade-offs comparison, **your recommendation with reasoning**, concrete implementation differences 4. **Ask user which approach they prefer** --- ## Phase 5: Implementation **Goal**: Build the feature **DO NOT START WITHOUT USER APPROVAL** **Actions**: 1. Wait for explicit user approval 2. Read all relevant files identified in previous phases 3. Implement following chosen architecture 4. Follow codebase conventions strictly 5. Write clean, well-documented code 6. Update todos as you progress --- ## Phase 6: Quality Review **Goal**: Ensure code is simple, DRY, elegant, easy to read, and functionally correct **Actions**: 1. Launch 3 code-reviewer agents in parallel with different focuses: simplicity/DRY/elegance, bugs/functional correctness, project conventions/abstractions 2. Consolidate findings and identify highest severity issues that you recommend fixing 3. **Present findings to user and ask what they want to do** (fix now, fix later, or proceed as-is) 4. Address issues based on user decision --- ## Phase 7: Summary **Goal**: Document what was accomplished **Actions**: 1. Mark all todos complete 2. Summarize: - What was built - Key decisions made - Files modified - Suggested next steps --- ================================================ FILE: plugins/frontend-design/.claude-plugin/plugin.json ================================================ { "name": "frontend-design", "version": "1.0.0", "description": "Frontend design skill for UI/UX implementation", "author": { "name": "Prithvi Rajasekaran, Alexander Bricken", "email": "prithvi@anthropic.com, alexander@anthropic.com" } } ================================================ FILE: plugins/frontend-design/README.md ================================================ # Frontend Design Plugin Generates distinctive, production-grade frontend interfaces that avoid generic AI aesthetics. ## What It Does Claude automatically uses this skill for frontend work. Creates production-ready code with: - Bold aesthetic choices - Distinctive typography and color palettes - High-impact animations and visual details - Context-aware implementation ## Usage ``` "Create a dashboard for a music streaming app" "Build a landing page for an AI security startup" "Design a settings panel with dark mode" ``` Claude will choose a clear aesthetic direction and implement production code with meticulous attention to detail. ## Learn More See the [Frontend Aesthetics Cookbook](https://github.com/anthropics/claude-cookbooks/blob/main/coding/prompting_for_frontend_aesthetics.ipynb) for detailed guidance on prompting for high-quality frontend design. ## Authors Prithvi Rajasekaran (prithvi@anthropic.com) Alexander Bricken (alexander@anthropic.com) ================================================ FILE: plugins/frontend-design/skills/frontend-design/SKILL.md ================================================ --- name: frontend-design description: Create distinctive, production-grade frontend interfaces with high design quality. Use this skill when the user asks to build web components, pages, or applications. Generates creative, polished code that avoids generic AI aesthetics. license: Complete terms in LICENSE.txt --- This skill guides creation of distinctive, production-grade frontend interfaces that avoid generic "AI slop" aesthetics. Implement real working code with exceptional attention to aesthetic details and creative choices. The user provides frontend requirements: a component, page, application, or interface to build. They may include context about the purpose, audience, or technical constraints. ## Design Thinking Before coding, understand the context and commit to a BOLD aesthetic direction: - **Purpose**: What problem does this interface solve? Who uses it? - **Tone**: Pick an extreme: brutally minimal, maximalist chaos, retro-futuristic, organic/natural, luxury/refined, playful/toy-like, editorial/magazine, brutalist/raw, art deco/geometric, soft/pastel, industrial/utilitarian, etc. There are so many flavors to choose from. Use these for inspiration but design one that is true to the aesthetic direction. - **Constraints**: Technical requirements (framework, performance, accessibility). - **Differentiation**: What makes this UNFORGETTABLE? What's the one thing someone will remember? **CRITICAL**: Choose a clear conceptual direction and execute it with precision. Bold maximalism and refined minimalism both work - the key is intentionality, not intensity. Then implement working code (HTML/CSS/JS, React, Vue, etc.) that is: - Production-grade and functional - Visually striking and memorable - Cohesive with a clear aesthetic point-of-view - Meticulously refined in every detail ## Frontend Aesthetics Guidelines Focus on: - **Typography**: Choose fonts that are beautiful, unique, and interesting. Avoid generic fonts like Arial and Inter; opt instead for distinctive choices that elevate the frontend's aesthetics; unexpected, characterful font choices. Pair a distinctive display font with a refined body font. - **Color & Theme**: Commit to a cohesive aesthetic. Use CSS variables for consistency. Dominant colors with sharp accents outperform timid, evenly-distributed palettes. - **Motion**: Use animations for effects and micro-interactions. Prioritize CSS-only solutions for HTML. Use Motion library for React when available. Focus on high-impact moments: one well-orchestrated page load with staggered reveals (animation-delay) creates more delight than scattered micro-interactions. Use scroll-triggering and hover states that surprise. - **Spatial Composition**: Unexpected layouts. Asymmetry. Overlap. Diagonal flow. Grid-breaking elements. Generous negative space OR controlled density. - **Backgrounds & Visual Details**: Create atmosphere and depth rather than defaulting to solid colors. Add contextual effects and textures that match the overall aesthetic. Apply creative forms like gradient meshes, noise textures, geometric patterns, layered transparencies, dramatic shadows, decorative borders, custom cursors, and grain overlays. NEVER use generic AI-generated aesthetics like overused font families (Inter, Roboto, Arial, system fonts), cliched color schemes (particularly purple gradients on white backgrounds), predictable layouts and component patterns, and cookie-cutter design that lacks context-specific character. Interpret creatively and make unexpected choices that feel genuinely designed for the context. No design should be the same. Vary between light and dark themes, different fonts, different aesthetics. NEVER converge on common choices (Space Grotesk, for example) across generations. **IMPORTANT**: Match implementation complexity to the aesthetic vision. Maximalist designs need elaborate code with extensive animations and effects. Minimalist or refined designs need restraint, precision, and careful attention to spacing, typography, and subtle details. Elegance comes from executing the vision well. Remember: Claude is capable of extraordinary creative work. Don't hold back, show what can truly be created when thinking outside the box and committing fully to a distinctive vision. ================================================ FILE: plugins/hookify/.claude-plugin/plugin.json ================================================ { "name": "hookify", "version": "0.1.0", "description": "Easily create hooks to prevent unwanted behaviors by analyzing conversation patterns", "author": { "name": "Daisy Hollman", "email": "daisy@anthropic.com" } } ================================================ FILE: plugins/hookify/.gitignore ================================================ # Python __pycache__/ *.py[cod] *$py.class *.so .Python # Virtual environments venv/ env/ ENV/ # IDE .vscode/ .idea/ *.swp *.swo # OS .DS_Store Thumbs.db # Testing .pytest_cache/ .coverage htmlcov/ # Local configuration (should not be committed) .claude/*.local.md .claude/*.local.json ================================================ FILE: plugins/hookify/README.md ================================================ # Hookify Plugin Easily create custom hooks to prevent unwanted behaviors by analyzing conversation patterns or from explicit instructions. ## Overview The hookify plugin makes it simple to create hooks without editing complex `hooks.json` files. Instead, you create lightweight markdown configuration files that define patterns to watch for and messages to show when those patterns match. **Key features:** - 🎯 Analyze conversations to find unwanted behaviors automatically - 📝 Simple markdown configuration files with YAML frontmatter - 🔍 Regex pattern matching for powerful rules - 🚀 No coding required - just describe the behavior - 🔄 Easy enable/disable without restarting ## Quick Start ### 1. Create Your First Rule ```bash /hookify Warn me when I use rm -rf commands ``` This analyzes your request and creates `.claude/hookify.warn-rm.local.md`. ### 2. Test It Immediately **No restart needed!** Rules take effect on the very next tool use. Ask Claude to run a command that should trigger the rule: ``` Run rm -rf /tmp/test ``` You should see the warning message immediately! ## Usage ### Main Command: /hookify **With arguments:** ``` /hookify Don't use console.log in TypeScript files ``` Creates a rule from your explicit instructions. **Without arguments:** ``` /hookify ``` Analyzes recent conversation to find behaviors you've corrected or been frustrated by. ### Helper Commands **List all rules:** ``` /hookify:list ``` **Configure rules interactively:** ``` /hookify:configure ``` Enable/disable existing rules through an interactive interface. **Get help:** ``` /hookify:help ``` ## Rule Configuration Format ### Simple Rule (Single Pattern) `.claude/hookify.dangerous-rm.local.md`: ```markdown --- name: block-dangerous-rm enabled: true event: bash pattern: rm\s+-rf action: block --- ⚠️ **Dangerous rm command detected!** This command could delete important files. Please: - Verify the path is correct - Consider using a safer approach - Make sure you have backups ``` **Action field:** - `warn`: Shows warning but allows operation (default) - `block`: Prevents operation from executing (PreToolUse) or stops session (Stop events) ### Advanced Rule (Multiple Conditions) `.claude/hookify.sensitive-files.local.md`: ```markdown --- name: warn-sensitive-files enabled: true event: file action: warn conditions: - field: file_path operator: regex_match pattern: \.env$|credentials|secrets - field: new_text operator: contains pattern: KEY --- 🔐 **Sensitive file edit detected!** Ensure credentials are not hardcoded and file is in .gitignore. ``` **All conditions must match** for the rule to trigger. ## Event Types - **`bash`**: Triggers on Bash tool commands - **`file`**: Triggers on Edit, Write, MultiEdit tools - **`stop`**: Triggers when Claude wants to stop (for completion checks) - **`prompt`**: Triggers on user prompt submission - **`all`**: Triggers on all events ## Pattern Syntax Use Python regex syntax: | Pattern | Matches | Example | |---------|---------|---------| | `rm\s+-rf` | rm -rf | rm -rf /tmp | | `console\.log\(` | console.log( | console.log("test") | | `(eval\|exec)\(` | eval( or exec( | eval("code") | | `\.env$` | files ending in .env | .env, .env.local | | `chmod\s+777` | chmod 777 | chmod 777 file.txt | **Tips:** - Use `\s` for whitespace - Escape special chars: `\.` for literal dot - Use `|` for OR: `(foo|bar)` - Use `.*` to match anything - Set `action: block` for dangerous operations - Set `action: warn` (or omit) for informational warnings ## Examples ### Example 1: Block Dangerous Commands ```markdown --- name: block-destructive-ops enabled: true event: bash pattern: rm\s+-rf|dd\s+if=|mkfs|format action: block --- 🛑 **Destructive operation detected!** This command can cause data loss. Operation blocked for safety. Please verify the exact path and use a safer approach. ``` **This rule blocks the operation** - Claude will not be allowed to execute these commands. ### Example 2: Warn About Debug Code ```markdown --- name: warn-debug-code enabled: true event: file pattern: console\.log\(|debugger;|print\( action: warn --- 🐛 **Debug code detected** Remember to remove debugging statements before committing. ``` **This rule warns but allows** - Claude sees the message but can still proceed. ### Example 3: Require Tests Before Stopping ```markdown --- name: require-tests-run enabled: false event: stop action: block conditions: - field: transcript operator: not_contains pattern: npm test|pytest|cargo test --- **Tests not detected in transcript!** Before stopping, please run tests to verify your changes work correctly. ``` **This blocks Claude from stopping** if no test commands appear in the session transcript. Enable only when you want strict enforcement. ## Advanced Usage ### Multiple Conditions Check multiple fields simultaneously: ```markdown --- name: api-key-in-typescript enabled: true event: file conditions: - field: file_path operator: regex_match pattern: \.tsx?$ - field: new_text operator: regex_match pattern: (API_KEY|SECRET|TOKEN)\s*=\s*["'] --- 🔐 **Hardcoded credential in TypeScript!** Use environment variables instead of hardcoded values. ``` ### Operators Reference - `regex_match`: Pattern must match (most common) - `contains`: String must contain pattern - `equals`: Exact string match - `not_contains`: String must NOT contain pattern - `starts_with`: String starts with pattern - `ends_with`: String ends with pattern ### Field Reference **For bash events:** - `command`: The bash command string **For file events:** - `file_path`: Path to file being edited - `new_text`: New content being added (Edit, Write) - `old_text`: Old content being replaced (Edit only) - `content`: File content (Write only) **For prompt events:** - `user_prompt`: The user's submitted prompt text **For stop events:** - Use general matching on session state ## Management ### Enable/Disable Rules **Temporarily disable:** Edit the `.local.md` file and set `enabled: false` **Re-enable:** Set `enabled: true` **Or use interactive tool:** ``` /hookify:configure ``` ### Delete Rules Simply delete the `.local.md` file: ```bash rm .claude/hookify.my-rule.local.md ``` ### View All Rules ``` /hookify:list ``` ## Installation This plugin is part of the Claude Code Marketplace. It should be auto-discovered when the marketplace is installed. **Manual testing:** ```bash cc --plugin-dir /path/to/hookify ``` ## Requirements - Python 3.7+ - No external dependencies (uses stdlib only) ## Troubleshooting **Rule not triggering:** 1. Check rule file exists in `.claude/` directory (in project root, not plugin directory) 2. Verify `enabled: true` in frontmatter 3. Test regex pattern separately 4. Rules should work immediately - no restart needed 5. Try `/hookify:list` to see if rule is loaded **Import errors:** - Ensure Python 3 is available: `python3 --version` - Check hookify plugin is installed **Pattern not matching:** - Test regex: `python3 -c "import re; print(re.search(r'pattern', 'text'))"` - Use unquoted patterns in YAML to avoid escaping issues - Start simple, then add complexity **Hook seems slow:** - Keep patterns simple (avoid complex regex) - Use specific event types (bash, file) instead of "all" - Limit number of active rules ## Contributing Found a useful rule pattern? Consider sharing example files via PR! ## Future Enhancements - Severity levels (error/warning/info distinctions) - Rule templates library - Interactive pattern builder - Hook testing utilities - JSON format support (in addition to markdown) ## License MIT License ================================================ FILE: plugins/hookify/agents/conversation-analyzer.md ================================================ --- name: conversation-analyzer description: Use this agent when analyzing conversation transcripts to find behaviors worth preventing with hooks. Examples: Context: User is running /hookify command without arguments\nuser: "/hookify"\nassistant: "I'll analyze the conversation to find behaviors you want to prevent"\nThe /hookify command without arguments triggers conversation analysis to find unwanted behaviors.Context: User wants to create hooks from recent frustrations\nuser: "Can you look back at this conversation and help me create hooks for the mistakes you made?"\nassistant: "I'll use the conversation-analyzer agent to identify the issues and suggest hooks."\nUser explicitly asks to analyze conversation for mistakes that should be prevented. model: inherit color: yellow tools: ["Read", "Grep"] --- You are a conversation analysis specialist that identifies problematic behaviors in Claude Code sessions that could be prevented with hooks. **Your Core Responsibilities:** 1. Read and analyze user messages to find frustration signals 2. Identify specific tool usage patterns that caused issues 3. Extract actionable patterns that can be matched with regex 4. Categorize issues by severity and type 5. Provide structured findings for hook rule generation **Analysis Process:** ### 1. Search for User Messages Indicating Issues Read through user messages in reverse chronological order (most recent first). Look for: **Explicit correction requests:** - "Don't use X" - "Stop doing Y" - "Please don't Z" - "Avoid..." - "Never..." **Frustrated reactions:** - "Why did you do X?" - "I didn't ask for that" - "That's not what I meant" - "That was wrong" **Corrections and reversions:** - User reverting changes Claude made - User fixing issues Claude created - User providing step-by-step corrections **Repeated issues:** - Same type of mistake multiple times - User having to remind multiple times - Pattern of similar problems ### 2. Identify Tool Usage Patterns For each issue, determine: - **Which tool**: Bash, Edit, Write, MultiEdit - **What action**: Specific command or code pattern - **When it happened**: During what task/phase - **Why problematic**: User's stated reason or implicit concern **Extract concrete examples:** - For Bash: Actual command that was problematic - For Edit/Write: Code pattern that was added - For Stop: What was missing before stopping ### 3. Create Regex Patterns Convert behaviors into matchable patterns: **Bash command patterns:** - `rm\s+-rf` for dangerous deletes - `sudo\s+` for privilege escalation - `chmod\s+777` for permission issues **Code patterns (Edit/Write):** - `console\.log\(` for debug logging - `eval\(|new Function\(` for dangerous eval - `innerHTML\s*=` for XSS risks **File path patterns:** - `\.env$` for environment files - `/node_modules/` for dependency files - `dist/|build/` for generated files ### 4. Categorize Severity **High severity (should block in future):** - Dangerous commands (rm -rf, chmod 777) - Security issues (hardcoded secrets, eval) - Data loss risks **Medium severity (warn):** - Style violations (console.log in production) - Wrong file types (editing generated files) - Missing best practices **Low severity (optional):** - Preferences (coding style) - Non-critical patterns ### 5. Output Format Return your findings as structured text in this format: ``` ## Hookify Analysis Results ### Issue 1: Dangerous rm Commands **Severity**: High **Tool**: Bash **Pattern**: `rm\s+-rf` **Occurrences**: 3 times **Context**: Used rm -rf on /tmp directories without verification **User Reaction**: "Please be more careful with rm commands" **Suggested Rule:** - Name: warn-dangerous-rm - Event: bash - Pattern: rm\s+-rf - Message: "Dangerous rm command detected. Verify path before proceeding." --- ### Issue 2: Console.log in TypeScript **Severity**: Medium **Tool**: Edit/Write **Pattern**: `console\.log\(` **Occurrences**: 2 times **Context**: Added console.log statements to production TypeScript files **User Reaction**: "Don't use console.log in production code" **Suggested Rule:** - Name: warn-console-log - Event: file - Pattern: console\.log\( - Message: "Console.log detected. Use proper logging library instead." --- [Continue for each issue found...] ## Summary Found {N} behaviors worth preventing: - {N} high severity - {N} medium severity - {N} low severity Recommend creating rules for high and medium severity issues. ``` **Quality Standards:** - Be specific about patterns (don't be overly broad) - Include actual examples from conversation - Explain why each issue matters - Provide ready-to-use regex patterns - Don't false-positive on discussions about what NOT to do **Edge Cases:** **User discussing hypotheticals:** - "What would happen if I used rm -rf?" - Don't treat as problematic behavior **Teaching moments:** - "Here's what you shouldn't do: ..." - Context indicates explanation, not actual problem **One-time accidents:** - Single occurrence, already fixed - Mention but mark as low priority **Subjective preferences:** - "I prefer X over Y" - Mark as low severity, let user decide **Return Results:** Provide your analysis in the structured format above. The /hookify command will use this to: 1. Present findings to user 2. Ask which rules to create 3. Generate .local.md configuration files 4. Save rules to .claude directory ================================================ FILE: plugins/hookify/commands/configure.md ================================================ --- description: Enable or disable hookify rules interactively allowed-tools: ["Glob", "Read", "Edit", "AskUserQuestion", "Skill"] --- # Configure Hookify Rules **Load hookify:writing-rules skill first** to understand rule format. Enable or disable existing hookify rules using an interactive interface. ## Steps ### 1. Find Existing Rules Use Glob tool to find all hookify rule files: ``` pattern: ".claude/hookify.*.local.md" ``` If no rules found, inform user: ``` No hookify rules configured yet. Use `/hookify` to create your first rule. ``` ### 2. Read Current State For each rule file: - Read the file - Extract `name` and `enabled` fields from frontmatter - Build list of rules with current state ### 3. Ask User Which Rules to Toggle Use AskUserQuestion to let user select rules: ```json { "questions": [ { "question": "Which rules would you like to enable or disable?", "header": "Configure", "multiSelect": true, "options": [ { "label": "warn-dangerous-rm (currently enabled)", "description": "Warns about rm -rf commands" }, { "label": "warn-console-log (currently disabled)", "description": "Warns about console.log in code" }, { "label": "require-tests (currently enabled)", "description": "Requires tests before stopping" } ] } ] } ``` **Option format:** - Label: `{rule-name} (currently {enabled|disabled})` - Description: Brief description from rule's message or pattern ### 4. Parse User Selection For each selected rule: - Determine current state from label (enabled/disabled) - Toggle state: enabled → disabled, disabled → enabled ### 5. Update Rule Files For each rule to toggle: - Use Read tool to read current content - Use Edit tool to change `enabled: true` to `enabled: false` (or vice versa) - Handle both with and without quotes **Edit pattern for enabling:** ``` old_string: "enabled: false" new_string: "enabled: true" ``` **Edit pattern for disabling:** ``` old_string: "enabled: true" new_string: "enabled: false" ``` ### 6. Confirm Changes Show user what was changed: ``` ## Hookify Rules Updated **Enabled:** - warn-console-log **Disabled:** - warn-dangerous-rm **Unchanged:** - require-tests Changes apply immediately - no restart needed ``` ## Important Notes - Changes take effect immediately on next tool use - You can also manually edit .claude/hookify.*.local.md files - To permanently remove a rule, delete its .local.md file - Use `/hookify:list` to see all configured rules ## Edge Cases **No rules to configure:** - Show message about using `/hookify` to create rules first **User selects no rules:** - Inform that no changes were made **File read/write errors:** - Inform user of specific error - Suggest manual editing as fallback ================================================ FILE: plugins/hookify/commands/help.md ================================================ --- description: Get help with the hookify plugin allowed-tools: ["Read"] --- # Hookify Plugin Help Explain how the hookify plugin works and how to use it. ## Overview The hookify plugin makes it easy to create custom hooks that prevent unwanted behaviors. Instead of editing `hooks.json` files, users create simple markdown configuration files that define patterns to watch for. ## How It Works ### 1. Hook System Hookify installs generic hooks that run on these events: - **PreToolUse**: Before any tool executes (Bash, Edit, Write, etc.) - **PostToolUse**: After a tool executes - **Stop**: When Claude wants to stop working - **UserPromptSubmit**: When user submits a prompt These hooks read configuration files from `.claude/hookify.*.local.md` and check if any rules match the current operation. ### 2. Configuration Files Users create rules in `.claude/hookify.{rule-name}.local.md` files: ```markdown --- name: warn-dangerous-rm enabled: true event: bash pattern: rm\s+-rf --- ⚠️ **Dangerous rm command detected!** This command could delete important files. Please verify the path. ``` **Key fields:** - `name`: Unique identifier for the rule - `enabled`: true/false to activate/deactivate - `event`: bash, file, stop, prompt, or all - `pattern`: Regex pattern to match The message body is what Claude sees when the rule triggers. ### 3. Creating Rules **Option A: Use /hookify command** ``` /hookify Don't use console.log in production files ``` This analyzes your request and creates the appropriate rule file. **Option B: Create manually** Create `.claude/hookify.my-rule.local.md` with the format above. **Option C: Analyze conversation** ``` /hookify ``` Without arguments, hookify analyzes recent conversation to find behaviors you want to prevent. ## Available Commands - **`/hookify`** - Create hooks from conversation analysis or explicit instructions - **`/hookify:help`** - Show this help (what you're reading now) - **`/hookify:list`** - List all configured hooks - **`/hookify:configure`** - Enable/disable existing hooks interactively ## Example Use Cases **Prevent dangerous commands:** ```markdown --- name: block-chmod-777 enabled: true event: bash pattern: chmod\s+777 --- Don't use chmod 777 - it's a security risk. Use specific permissions instead. ``` **Warn about debugging code:** ```markdown --- name: warn-console-log enabled: true event: file pattern: console\.log\( --- Console.log detected. Remember to remove debug logging before committing. ``` **Require tests before stopping:** ```markdown --- name: require-tests enabled: true event: stop pattern: .* --- Did you run tests before finishing? Make sure `npm test` or equivalent was executed. ``` ## Pattern Syntax Use Python regex syntax: - `\s` - whitespace - `\.` - literal dot - `|` - OR - `+` - one or more - `*` - zero or more - `\d` - digit - `[abc]` - character class **Examples:** - `rm\s+-rf` - matches "rm -rf" - `console\.log\(` - matches "console.log(" - `(eval|exec)\(` - matches "eval(" or "exec(" - `\.env$` - matches files ending in .env ## Important Notes **No Restart Needed**: Hookify rules (`.local.md` files) take effect immediately on the next tool use. The hookify hooks are already loaded and read your rules dynamically. **Block or Warn**: Rules can either `block` operations (prevent execution) or `warn` (show message but allow). Set `action: block` or `action: warn` in the rule's frontmatter. **Rule Files**: Keep rules in `.claude/hookify.*.local.md` - they should be git-ignored (add to .gitignore if needed). **Disable Rules**: Set `enabled: false` in frontmatter or delete the file. ## Troubleshooting **Hook not triggering:** - Check rule file is in `.claude/` directory - Verify `enabled: true` in frontmatter - Confirm pattern is valid regex - Test pattern: `python3 -c "import re; print(re.search('your_pattern', 'test_text'))"` - Rules take effect immediately - no restart needed **Import errors:** - Check Python 3 is available: `python3 --version` - Verify hookify plugin is installed correctly **Pattern not matching:** - Test regex separately - Check for escaping issues (use unquoted patterns in YAML) - Try simpler pattern first, then refine ## Getting Started 1. Create your first rule: ``` /hookify Warn me when I try to use rm -rf ``` 2. Try to trigger it: - Ask Claude to run `rm -rf /tmp/test` - You should see the warning 4. Refine the rule by editing `.claude/hookify.warn-rm.local.md` 5. Create more rules as you encounter unwanted behaviors For more examples, check the `${CLAUDE_PLUGIN_ROOT}/examples/` directory. ================================================ FILE: plugins/hookify/commands/hookify.md ================================================ --- description: Create hooks to prevent unwanted behaviors from conversation analysis or explicit instructions argument-hint: Optional specific behavior to address allowed-tools: ["Read", "Write", "AskUserQuestion", "Task", "Grep", "TodoWrite", "Skill"] --- # Hookify - Create Hooks from Unwanted Behaviors **FIRST: Load the hookify:writing-rules skill** using the Skill tool to understand rule file format and syntax. Create hook rules to prevent problematic behaviors by analyzing the conversation or from explicit user instructions. ## Your Task You will help the user create hookify rules to prevent unwanted behaviors. Follow these steps: ### Step 1: Gather Behavior Information **If $ARGUMENTS is provided:** - User has given specific instructions: `$ARGUMENTS` - Still analyze recent conversation (last 10-15 user messages) for additional context - Look for examples of the behavior happening **If $ARGUMENTS is empty:** - Launch the conversation-analyzer agent to find problematic behaviors - Agent will scan user prompts for frustration signals - Agent will return structured findings **To analyze conversation:** Use the Task tool to launch conversation-analyzer agent: ``` { "subagent_type": "general-purpose", "description": "Analyze conversation for unwanted behaviors", "prompt": "You are analyzing a Claude Code conversation to find behaviors the user wants to prevent. Read user messages in the current conversation and identify: 1. Explicit requests to avoid something (\"don't do X\", \"stop doing Y\") 2. Corrections or reversions (user fixing Claude's actions) 3. Frustrated reactions (\"why did you do X?\", \"I didn't ask for that\") 4. Repeated issues (same problem multiple times) For each issue found, extract: - What tool was used (Bash, Edit, Write, etc.) - Specific pattern or command - Why it was problematic - User's stated reason Return findings as a structured list with: - category: Type of issue - tool: Which tool was involved - pattern: Regex or literal pattern to match - context: What happened - severity: high/medium/low Focus on the most recent issues (last 20-30 messages). Don't go back further unless explicitly asked." } ``` ### Step 2: Present Findings to User After gathering behaviors (from arguments or agent), present to user using AskUserQuestion: **Question 1: Which behaviors to hookify?** - Header: "Create Rules" - multiSelect: true - Options: List each detected behavior (max 4) - Label: Short description (e.g., "Block rm -rf") - Description: Why it's problematic **Question 2: For each selected behavior, ask about action:** - "Should this block the operation or just warn?" - Options: - "Just warn" (action: warn - shows message but allows) - "Block operation" (action: block - prevents execution) **Question 3: Ask for example patterns:** - "What patterns should trigger this rule?" - Show detected patterns - Allow user to refine or add more ### Step 3: Generate Rule Files For each confirmed behavior, create a `.claude/hookify.{rule-name}.local.md` file: **Rule naming convention:** - Use kebab-case - Be descriptive: `block-dangerous-rm`, `warn-console-log`, `require-tests-before-stop` - Start with action verb: block, warn, prevent, require **File format:** ```markdown --- name: {rule-name} enabled: true event: {bash|file|stop|prompt|all} pattern: {regex pattern} action: {warn|block} --- {Message to show Claude when rule triggers} ``` **Action values:** - `warn`: Show message but allow operation (default) - `block`: Prevent operation or stop session **For more complex rules (multiple conditions):** ```markdown --- name: {rule-name} enabled: true event: file conditions: - field: file_path operator: regex_match pattern: \.env$ - field: new_text operator: contains pattern: API_KEY --- {Warning message} ``` ### Step 4: Create Files and Confirm **IMPORTANT**: Rule files must be created in the current working directory's `.claude/` folder, NOT the plugin directory. Use the current working directory (where Claude Code was started) as the base path. 1. Check if `.claude/` directory exists in current working directory - If not, create it first with: `mkdir -p .claude` 2. Use Write tool to create each `.claude/hookify.{name}.local.md` file - Use relative path from current working directory: `.claude/hookify.{name}.local.md` - The path should resolve to the project's .claude directory, not the plugin's 3. Show user what was created: ``` Created 3 hookify rules: - .claude/hookify.dangerous-rm.local.md - .claude/hookify.console-log.local.md - .claude/hookify.sensitive-files.local.md These rules will trigger on: - dangerous-rm: Bash commands matching "rm -rf" - console-log: Edits adding console.log statements - sensitive-files: Edits to .env or credentials files ``` 4. Verify files were created in the correct location by listing them 5. Inform user: **"Rules are active immediately - no restart needed!"** The hookify hooks are already loaded and will read your new rules on the next tool use. ## Event Types Reference - **bash**: Matches Bash tool commands - **file**: Matches Edit, Write, MultiEdit tools - **stop**: Matches when agent wants to stop (use for completion checks) - **prompt**: Matches when user submits prompts - **all**: Matches all events ## Pattern Writing Tips **Bash patterns:** - Match dangerous commands: `rm\s+-rf|chmod\s+777|dd\s+if=` - Match specific tools: `npm\s+install\s+|pip\s+install` **File patterns:** - Match code patterns: `console\.log\(|eval\(|innerHTML\s*=` - Match file paths: `\.env$|\.git/|node_modules/` **Stop patterns:** - Check for missing steps: (check transcript or completion criteria) ## Example Workflow **User says**: "/hookify Don't use rm -rf without asking me first" **Your response**: 1. Analyze: User wants to prevent rm -rf commands 2. Ask: "Should I block this command or just warn you?" 3. User selects: "Just warn" 4. Create `.claude/hookify.dangerous-rm.local.md`: ```markdown --- name: warn-dangerous-rm enabled: true event: bash pattern: rm\s+-rf --- ⚠️ **Dangerous rm command detected** You requested to be warned before using rm -rf. Please verify the path is correct. ``` 5. Confirm: "Created hookify rule. It's active immediately - try triggering it!" ## Important Notes - **No restart needed**: Rules take effect immediately on the next tool use - **File location**: Create files in project's `.claude/` directory (current working directory), NOT the plugin's .claude/ - **Regex syntax**: Use Python regex syntax (raw strings, no need to escape in YAML) - **Action types**: Rules can `warn` (default) or `block` operations - **Testing**: Test rules immediately after creating them ## Troubleshooting **If rule file creation fails:** 1. Check current working directory with pwd 2. Ensure `.claude/` directory exists (create with mkdir if needed) 3. Use absolute path if needed: `{cwd}/.claude/hookify.{name}.local.md` 4. Verify file was created with Glob or ls **If rule doesn't trigger after creation:** 1. Verify file is in project `.claude/` not plugin `.claude/` 2. Check file with Read tool to ensure pattern is correct 3. Test pattern with: `python3 -c "import re; print(re.search(r'pattern', 'test text'))"` 4. Verify `enabled: true` in frontmatter 5. Remember: Rules work immediately, no restart needed **If blocking seems too strict:** 1. Change `action: block` to `action: warn` in the rule file 2. Or adjust the pattern to be more specific 3. Changes take effect on next tool use Use TodoWrite to track your progress through the steps. ================================================ FILE: plugins/hookify/commands/list.md ================================================ --- description: List all configured hookify rules allowed-tools: ["Glob", "Read", "Skill"] --- # List Hookify Rules **Load hookify:writing-rules skill first** to understand rule format. Show all configured hookify rules in the project. ## Steps 1. Use Glob tool to find all hookify rule files: ``` pattern: ".claude/hookify.*.local.md" ``` 2. For each file found: - Use Read tool to read the file - Extract frontmatter fields: name, enabled, event, pattern - Extract message preview (first 100 chars) 3. Present results in a table: ``` ## Configured Hookify Rules | Name | Enabled | Event | Pattern | File | |------|---------|-------|---------|------| | warn-dangerous-rm | ✅ Yes | bash | rm\s+-rf | hookify.dangerous-rm.local.md | | warn-console-log | ✅ Yes | file | console\.log\( | hookify.console-log.local.md | | check-tests | ❌ No | stop | .* | hookify.require-tests.local.md | **Total**: 3 rules (2 enabled, 1 disabled) ``` 4. For each rule, show a brief preview: ``` ### warn-dangerous-rm **Event**: bash **Pattern**: `rm\s+-rf` **Message**: "⚠️ **Dangerous rm command detected!** This command could delete..." **Status**: ✅ Active **File**: .claude/hookify.dangerous-rm.local.md ``` 5. Add helpful footer: ``` --- To modify a rule: Edit the .local.md file directly To disable a rule: Set `enabled: false` in frontmatter To enable a rule: Set `enabled: true` in frontmatter To delete a rule: Remove the .local.md file To create a rule: Use `/hookify` command **Remember**: Changes take effect immediately - no restart needed ``` ## If No Rules Found If no hookify rules exist: ``` ## No Hookify Rules Configured You haven't created any hookify rules yet. To get started: 1. Use `/hookify` to analyze conversation and create rules 2. Or manually create `.claude/hookify.my-rule.local.md` files 3. See `/hookify:help` for documentation Example: ``` /hookify Warn me when I use console.log ``` Check `${CLAUDE_PLUGIN_ROOT}/examples/` for example rule files. ``` ================================================ FILE: plugins/hookify/core/__init__.py ================================================ ================================================ FILE: plugins/hookify/core/config_loader.py ================================================ #!/usr/bin/env python3 """Configuration loader for hookify plugin. Loads and parses .claude/hookify.*.local.md files. """ import os import sys import glob import re from typing import List, Optional, Dict, Any from dataclasses import dataclass, field @dataclass class Condition: """A single condition for matching.""" field: str # "command", "new_text", "old_text", "file_path", etc. operator: str # "regex_match", "contains", "equals", etc. pattern: str # Pattern to match @classmethod def from_dict(cls, data: Dict[str, Any]) -> 'Condition': """Create Condition from dict.""" return cls( field=data.get('field', ''), operator=data.get('operator', 'regex_match'), pattern=data.get('pattern', '') ) @dataclass class Rule: """A hookify rule.""" name: str enabled: bool event: str # "bash", "file", "stop", "all", etc. pattern: Optional[str] = None # Simple pattern (legacy) conditions: List[Condition] = field(default_factory=list) action: str = "warn" # "warn" or "block" (future) tool_matcher: Optional[str] = None # Override tool matching message: str = "" # Message body from markdown @classmethod def from_dict(cls, frontmatter: Dict[str, Any], message: str) -> 'Rule': """Create Rule from frontmatter dict and message body.""" # Handle both simple pattern and complex conditions conditions = [] # New style: explicit conditions list if 'conditions' in frontmatter: cond_list = frontmatter['conditions'] if isinstance(cond_list, list): conditions = [Condition.from_dict(c) for c in cond_list] # Legacy style: simple pattern field simple_pattern = frontmatter.get('pattern') if simple_pattern and not conditions: # Convert simple pattern to condition # Infer field from event event = frontmatter.get('event', 'all') if event == 'bash': field = 'command' elif event == 'file': field = 'new_text' else: field = 'content' conditions = [Condition( field=field, operator='regex_match', pattern=simple_pattern )] return cls( name=frontmatter.get('name', 'unnamed'), enabled=frontmatter.get('enabled', True), event=frontmatter.get('event', 'all'), pattern=simple_pattern, conditions=conditions, action=frontmatter.get('action', 'warn'), tool_matcher=frontmatter.get('tool_matcher'), message=message.strip() ) def extract_frontmatter(content: str) -> tuple[Dict[str, Any], str]: """Extract YAML frontmatter and message body from markdown. Returns (frontmatter_dict, message_body). Supports multi-line dictionary items in lists by preserving indentation. """ if not content.startswith('---'): return {}, content # Split on --- markers parts = content.split('---', 2) if len(parts) < 3: return {}, content frontmatter_text = parts[1] message = parts[2].strip() # Simple YAML parser that handles indented list items frontmatter = {} lines = frontmatter_text.split('\n') current_key = None current_list = [] current_dict = {} in_list = False in_dict_item = False for line in lines: # Skip empty lines and comments stripped = line.strip() if not stripped or stripped.startswith('#'): continue # Check indentation level indent = len(line) - len(line.lstrip()) # Top-level key (no indentation or minimal) if indent == 0 and ':' in line and not line.strip().startswith('-'): # Save previous list/dict if any if in_list and current_key: if in_dict_item and current_dict: current_list.append(current_dict) current_dict = {} frontmatter[current_key] = current_list in_list = False in_dict_item = False current_list = [] key, value = line.split(':', 1) key = key.strip() value = value.strip() if not value: # Empty value - list or nested structure follows current_key = key in_list = True current_list = [] else: # Simple key-value pair value = value.strip('"').strip("'") if value.lower() == 'true': value = True elif value.lower() == 'false': value = False frontmatter[key] = value # List item (starts with -) elif stripped.startswith('-') and in_list: # Save previous dict item if any if in_dict_item and current_dict: current_list.append(current_dict) current_dict = {} item_text = stripped[1:].strip() # Check if this is an inline dict (key: value on same line) if ':' in item_text and ',' in item_text: # Inline comma-separated dict: "- field: command, operator: regex_match" item_dict = {} for part in item_text.split(','): if ':' in part: k, v = part.split(':', 1) item_dict[k.strip()] = v.strip().strip('"').strip("'") current_list.append(item_dict) in_dict_item = False elif ':' in item_text: # Start of multi-line dict item: "- field: command" in_dict_item = True k, v = item_text.split(':', 1) current_dict = {k.strip(): v.strip().strip('"').strip("'")} else: # Simple list item current_list.append(item_text.strip('"').strip("'")) in_dict_item = False # Continuation of dict item (indented under list item) elif indent > 2 and in_dict_item and ':' in line: # This is a field of the current dict item k, v = stripped.split(':', 1) current_dict[k.strip()] = v.strip().strip('"').strip("'") # Save final list/dict if any if in_list and current_key: if in_dict_item and current_dict: current_list.append(current_dict) frontmatter[current_key] = current_list return frontmatter, message def load_rules(event: Optional[str] = None) -> List[Rule]: """Load all hookify rules from .claude directory. Args: event: Optional event filter ("bash", "file", "stop", etc.) Returns: List of enabled Rule objects matching the event. """ rules = [] # Find all hookify.*.local.md files pattern = os.path.join('.claude', 'hookify.*.local.md') files = glob.glob(pattern) for file_path in files: try: rule = load_rule_file(file_path) if not rule: continue # Filter by event if specified if event: if rule.event != 'all' and rule.event != event: continue # Only include enabled rules if rule.enabled: rules.append(rule) except (IOError, OSError, PermissionError) as e: # File I/O errors - log and continue print(f"Warning: Failed to read {file_path}: {e}", file=sys.stderr) continue except (ValueError, KeyError, AttributeError, TypeError) as e: # Parsing errors - log and continue print(f"Warning: Failed to parse {file_path}: {e}", file=sys.stderr) continue except Exception as e: # Unexpected errors - log with type details print(f"Warning: Unexpected error loading {file_path} ({type(e).__name__}): {e}", file=sys.stderr) continue return rules def load_rule_file(file_path: str) -> Optional[Rule]: """Load a single rule file. Returns: Rule object or None if file is invalid. """ try: with open(file_path, 'r') as f: content = f.read() frontmatter, message = extract_frontmatter(content) if not frontmatter: print(f"Warning: {file_path} missing YAML frontmatter (must start with ---)", file=sys.stderr) return None rule = Rule.from_dict(frontmatter, message) return rule except (IOError, OSError, PermissionError) as e: print(f"Error: Cannot read {file_path}: {e}", file=sys.stderr) return None except (ValueError, KeyError, AttributeError, TypeError) as e: print(f"Error: Malformed rule file {file_path}: {e}", file=sys.stderr) return None except UnicodeDecodeError as e: print(f"Error: Invalid encoding in {file_path}: {e}", file=sys.stderr) return None except Exception as e: print(f"Error: Unexpected error parsing {file_path} ({type(e).__name__}): {e}", file=sys.stderr) return None # For testing if __name__ == '__main__': import sys # Test frontmatter parsing test_content = """--- name: test-rule enabled: true event: bash pattern: "rm -rf" --- ⚠️ Dangerous command detected! """ fm, msg = extract_frontmatter(test_content) print("Frontmatter:", fm) print("Message:", msg) rule = Rule.from_dict(fm, msg) print("Rule:", rule) ================================================ FILE: plugins/hookify/core/rule_engine.py ================================================ #!/usr/bin/env python3 """Rule evaluation engine for hookify plugin.""" import re import sys from functools import lru_cache from typing import List, Dict, Any, Optional # Import from local module from hookify.core.config_loader import Rule, Condition # Cache compiled regexes (max 128 patterns) @lru_cache(maxsize=128) def compile_regex(pattern: str) -> re.Pattern: """Compile regex pattern with caching. Args: pattern: Regex pattern string Returns: Compiled regex pattern """ return re.compile(pattern, re.IGNORECASE) class RuleEngine: """Evaluates rules against hook input data.""" def __init__(self): """Initialize rule engine.""" # No need for instance cache anymore - using global lru_cache pass def evaluate_rules(self, rules: List[Rule], input_data: Dict[str, Any]) -> Dict[str, Any]: """Evaluate all rules and return combined results. Checks all rules and accumulates matches. Blocking rules take priority over warning rules. All matching rule messages are combined. Args: rules: List of Rule objects to evaluate input_data: Hook input JSON (tool_name, tool_input, etc.) Returns: Response dict with systemMessage, hookSpecificOutput, etc. Empty dict {} if no rules match. """ hook_event = input_data.get('hook_event_name', '') blocking_rules = [] warning_rules = [] for rule in rules: if self._rule_matches(rule, input_data): if rule.action == 'block': blocking_rules.append(rule) else: warning_rules.append(rule) # If any blocking rules matched, block the operation if blocking_rules: messages = [f"**[{r.name}]**\n{r.message}" for r in blocking_rules] combined_message = "\n\n".join(messages) # Use appropriate blocking format based on event type if hook_event == 'Stop': return { "decision": "block", "reason": combined_message, "systemMessage": combined_message } elif hook_event in ['PreToolUse', 'PostToolUse']: return { "hookSpecificOutput": { "hookEventName": hook_event, "permissionDecision": "deny" }, "systemMessage": combined_message } else: # For other events, just show message return { "systemMessage": combined_message } # If only warnings, show them but allow operation if warning_rules: messages = [f"**[{r.name}]**\n{r.message}" for r in warning_rules] return { "systemMessage": "\n\n".join(messages) } # No matches - allow operation return {} def _rule_matches(self, rule: Rule, input_data: Dict[str, Any]) -> bool: """Check if rule matches input data. Args: rule: Rule to evaluate input_data: Hook input data Returns: True if rule matches, False otherwise """ # Extract tool information tool_name = input_data.get('tool_name', '') tool_input = input_data.get('tool_input', {}) # Check tool matcher if specified if rule.tool_matcher: if not self._matches_tool(rule.tool_matcher, tool_name): return False # If no conditions, don't match # (Rules must have at least one condition to be valid) if not rule.conditions: return False # All conditions must match for condition in rule.conditions: if not self._check_condition(condition, tool_name, tool_input, input_data): return False return True def _matches_tool(self, matcher: str, tool_name: str) -> bool: """Check if tool_name matches the matcher pattern. Args: matcher: Pattern like "Bash", "Edit|Write", "*" tool_name: Actual tool name Returns: True if matches """ if matcher == '*': return True # Split on | for OR matching patterns = matcher.split('|') return tool_name in patterns def _check_condition(self, condition: Condition, tool_name: str, tool_input: Dict[str, Any], input_data: Dict[str, Any] = None) -> bool: """Check if a single condition matches. Args: condition: Condition to check tool_name: Tool being used tool_input: Tool input dict input_data: Full hook input data (for Stop events, etc.) Returns: True if condition matches """ # Extract the field value to check field_value = self._extract_field(condition.field, tool_name, tool_input, input_data) if field_value is None: return False # Apply operator operator = condition.operator pattern = condition.pattern if operator == 'regex_match': return self._regex_match(pattern, field_value) elif operator == 'contains': return pattern in field_value elif operator == 'equals': return pattern == field_value elif operator == 'not_contains': return pattern not in field_value elif operator == 'starts_with': return field_value.startswith(pattern) elif operator == 'ends_with': return field_value.endswith(pattern) else: # Unknown operator return False def _extract_field(self, field: str, tool_name: str, tool_input: Dict[str, Any], input_data: Dict[str, Any] = None) -> Optional[str]: """Extract field value from tool input or hook input data. Args: field: Field name like "command", "new_text", "file_path", "reason", "transcript" tool_name: Tool being used (may be empty for Stop events) tool_input: Tool input dict input_data: Full hook input (for accessing transcript_path, reason, etc.) Returns: Field value as string, or None if not found """ # Direct tool_input fields if field in tool_input: value = tool_input[field] if isinstance(value, str): return value return str(value) # For Stop events and other non-tool events, check input_data if input_data: # Stop event specific fields if field == 'reason': return input_data.get('reason', '') elif field == 'transcript': # Read transcript file if path provided transcript_path = input_data.get('transcript_path') if transcript_path: try: with open(transcript_path, 'r') as f: return f.read() except FileNotFoundError: print(f"Warning: Transcript file not found: {transcript_path}", file=sys.stderr) return '' except PermissionError: print(f"Warning: Permission denied reading transcript: {transcript_path}", file=sys.stderr) return '' except (IOError, OSError) as e: print(f"Warning: Error reading transcript {transcript_path}: {e}", file=sys.stderr) return '' except UnicodeDecodeError as e: print(f"Warning: Encoding error in transcript {transcript_path}: {e}", file=sys.stderr) return '' elif field == 'user_prompt': # For UserPromptSubmit events return input_data.get('user_prompt', '') # Handle special cases by tool type if tool_name == 'Bash': if field == 'command': return tool_input.get('command', '') elif tool_name in ['Write', 'Edit']: if field == 'content': # Write uses 'content', Edit has 'new_string' return tool_input.get('content') or tool_input.get('new_string', '') elif field == 'new_text' or field == 'new_string': return tool_input.get('new_string', '') elif field == 'old_text' or field == 'old_string': return tool_input.get('old_string', '') elif field == 'file_path': return tool_input.get('file_path', '') elif tool_name == 'MultiEdit': if field == 'file_path': return tool_input.get('file_path', '') elif field in ['new_text', 'content']: # Concatenate all edits edits = tool_input.get('edits', []) return ' '.join(e.get('new_string', '') for e in edits) return None def _regex_match(self, pattern: str, text: str) -> bool: """Check if pattern matches text using regex. Args: pattern: Regex pattern text: Text to match against Returns: True if pattern matches """ try: # Use cached compiled regex (LRU cache with max 128 patterns) regex = compile_regex(pattern) return bool(regex.search(text)) except re.error as e: print(f"Invalid regex pattern '{pattern}': {e}", file=sys.stderr) return False # For testing if __name__ == '__main__': from hookify.core.config_loader import Condition, Rule # Test rule evaluation rule = Rule( name="test-rm", enabled=True, event="bash", conditions=[ Condition(field="command", operator="regex_match", pattern=r"rm\s+-rf") ], message="Dangerous rm command!" ) engine = RuleEngine() # Test matching input test_input = { "tool_name": "Bash", "tool_input": { "command": "rm -rf /tmp/test" } } result = engine.evaluate_rules([rule], test_input) print("Match result:", result) # Test non-matching input test_input2 = { "tool_name": "Bash", "tool_input": { "command": "ls -la" } } result2 = engine.evaluate_rules([rule], test_input2) print("Non-match result:", result2) ================================================ FILE: plugins/hookify/examples/console-log-warning.local.md ================================================ --- name: warn-console-log enabled: true event: file pattern: console\.log\( action: warn --- 🔍 **Console.log detected** You're adding a console.log statement. Please consider: - Is this for debugging or should it be proper logging? - Will this ship to production? - Should this use a logging library instead? ================================================ FILE: plugins/hookify/examples/dangerous-rm.local.md ================================================ --- name: block-dangerous-rm enabled: true event: bash pattern: rm\s+-rf action: block --- ⚠️ **Dangerous rm command detected!** This command could delete important files. Please: - Verify the path is correct - Consider using a safer approach - Make sure you have backups ================================================ FILE: plugins/hookify/examples/require-tests-stop.local.md ================================================ --- name: require-tests-run enabled: false event: stop action: block conditions: - field: transcript operator: not_contains pattern: npm test|pytest|cargo test --- **Tests not detected in transcript!** Before stopping, please run tests to verify your changes work correctly. Look for test commands like: - `npm test` - `pytest` - `cargo test` **Note:** This rule blocks stopping if no test commands appear in the transcript. Enable this rule only when you want strict test enforcement. ================================================ FILE: plugins/hookify/examples/sensitive-files-warning.local.md ================================================ --- name: warn-sensitive-files enabled: true event: file action: warn conditions: - field: file_path operator: regex_match pattern: \.env$|\.env\.|credentials|secrets --- 🔐 **Sensitive file detected** You're editing a file that may contain sensitive data: - Ensure credentials are not hardcoded - Use environment variables for secrets - Verify this file is in .gitignore - Consider using a secrets manager ================================================ FILE: plugins/hookify/hooks/__init__.py ================================================ ================================================ FILE: plugins/hookify/hooks/hooks.json ================================================ { "description": "Hookify plugin - User-configurable hooks from .local.md files", "hooks": { "PreToolUse": [ { "hooks": [ { "type": "command", "command": "python3 ${CLAUDE_PLUGIN_ROOT}/hooks/pretooluse.py", "timeout": 10 } ] } ], "PostToolUse": [ { "hooks": [ { "type": "command", "command": "python3 ${CLAUDE_PLUGIN_ROOT}/hooks/posttooluse.py", "timeout": 10 } ] } ], "Stop": [ { "hooks": [ { "type": "command", "command": "python3 ${CLAUDE_PLUGIN_ROOT}/hooks/stop.py", "timeout": 10 } ] } ], "UserPromptSubmit": [ { "hooks": [ { "type": "command", "command": "python3 ${CLAUDE_PLUGIN_ROOT}/hooks/userpromptsubmit.py", "timeout": 10 } ] } ] } } ================================================ FILE: plugins/hookify/hooks/posttooluse.py ================================================ #!/usr/bin/env python3 """PostToolUse hook executor for hookify plugin. This script is called by Claude Code after a tool executes. It reads .claude/hookify.*.local.md files and evaluates rules. """ import os import sys import json # CRITICAL: Add plugin root to Python path for imports PLUGIN_ROOT = os.environ.get('CLAUDE_PLUGIN_ROOT') if PLUGIN_ROOT: parent_dir = os.path.dirname(PLUGIN_ROOT) if parent_dir not in sys.path: sys.path.insert(0, parent_dir) if PLUGIN_ROOT not in sys.path: sys.path.insert(0, PLUGIN_ROOT) try: from hookify.core.config_loader import load_rules from hookify.core.rule_engine import RuleEngine except ImportError as e: error_msg = {"systemMessage": f"Hookify import error: {e}"} print(json.dumps(error_msg), file=sys.stdout) sys.exit(0) def main(): """Main entry point for PostToolUse hook.""" try: # Read input from stdin input_data = json.load(sys.stdin) # Determine event type based on tool tool_name = input_data.get('tool_name', '') event = None if tool_name == 'Bash': event = 'bash' elif tool_name in ['Edit', 'Write', 'MultiEdit']: event = 'file' # Load rules rules = load_rules(event=event) # Evaluate rules engine = RuleEngine() result = engine.evaluate_rules(rules, input_data) # Always output JSON (even if empty) print(json.dumps(result), file=sys.stdout) except Exception as e: error_output = { "systemMessage": f"Hookify error: {str(e)}" } print(json.dumps(error_output), file=sys.stdout) finally: # ALWAYS exit 0 sys.exit(0) if __name__ == '__main__': main() ================================================ FILE: plugins/hookify/hooks/pretooluse.py ================================================ #!/usr/bin/env python3 """PreToolUse hook executor for hookify plugin. This script is called by Claude Code before any tool executes. It reads .claude/hookify.*.local.md files and evaluates rules. """ import os import sys import json # CRITICAL: Add plugin root to Python path for imports # We need to add the parent of the plugin directory so Python can find "hookify" package PLUGIN_ROOT = os.environ.get('CLAUDE_PLUGIN_ROOT') if PLUGIN_ROOT: # Add the parent directory of the plugin parent_dir = os.path.dirname(PLUGIN_ROOT) if parent_dir not in sys.path: sys.path.insert(0, parent_dir) # Also add PLUGIN_ROOT itself in case we have other scripts if PLUGIN_ROOT not in sys.path: sys.path.insert(0, PLUGIN_ROOT) try: from hookify.core.config_loader import load_rules from hookify.core.rule_engine import RuleEngine except ImportError as e: # If imports fail, allow operation and log error error_msg = {"systemMessage": f"Hookify import error: {e}"} print(json.dumps(error_msg), file=sys.stdout) sys.exit(0) def main(): """Main entry point for PreToolUse hook.""" try: # Read input from stdin input_data = json.load(sys.stdin) # Determine event type for filtering # For PreToolUse, we use tool_name to determine "bash" vs "file" event tool_name = input_data.get('tool_name', '') event = None if tool_name == 'Bash': event = 'bash' elif tool_name in ['Edit', 'Write', 'MultiEdit']: event = 'file' # Load rules rules = load_rules(event=event) # Evaluate rules engine = RuleEngine() result = engine.evaluate_rules(rules, input_data) # Always output JSON (even if empty) print(json.dumps(result), file=sys.stdout) except Exception as e: # On any error, allow the operation and log error_output = { "systemMessage": f"Hookify error: {str(e)}" } print(json.dumps(error_output), file=sys.stdout) finally: # ALWAYS exit 0 - never block operations due to hook errors sys.exit(0) if __name__ == '__main__': main() ================================================ FILE: plugins/hookify/hooks/stop.py ================================================ #!/usr/bin/env python3 """Stop hook executor for hookify plugin. This script is called by Claude Code when agent wants to stop. It reads .claude/hookify.*.local.md files and evaluates stop rules. """ import os import sys import json # CRITICAL: Add plugin root to Python path for imports PLUGIN_ROOT = os.environ.get('CLAUDE_PLUGIN_ROOT') if PLUGIN_ROOT: parent_dir = os.path.dirname(PLUGIN_ROOT) if parent_dir not in sys.path: sys.path.insert(0, parent_dir) if PLUGIN_ROOT not in sys.path: sys.path.insert(0, PLUGIN_ROOT) try: from hookify.core.config_loader import load_rules from hookify.core.rule_engine import RuleEngine except ImportError as e: error_msg = {"systemMessage": f"Hookify import error: {e}"} print(json.dumps(error_msg), file=sys.stdout) sys.exit(0) def main(): """Main entry point for Stop hook.""" try: # Read input from stdin input_data = json.load(sys.stdin) # Load stop rules rules = load_rules(event='stop') # Evaluate rules engine = RuleEngine() result = engine.evaluate_rules(rules, input_data) # Always output JSON (even if empty) print(json.dumps(result), file=sys.stdout) except Exception as e: # On any error, allow the operation error_output = { "systemMessage": f"Hookify error: {str(e)}" } print(json.dumps(error_output), file=sys.stdout) finally: # ALWAYS exit 0 sys.exit(0) if __name__ == '__main__': main() ================================================ FILE: plugins/hookify/hooks/userpromptsubmit.py ================================================ #!/usr/bin/env python3 """UserPromptSubmit hook executor for hookify plugin. This script is called by Claude Code when user submits a prompt. It reads .claude/hookify.*.local.md files and evaluates rules. """ import os import sys import json # CRITICAL: Add plugin root to Python path for imports PLUGIN_ROOT = os.environ.get('CLAUDE_PLUGIN_ROOT') if PLUGIN_ROOT: parent_dir = os.path.dirname(PLUGIN_ROOT) if parent_dir not in sys.path: sys.path.insert(0, parent_dir) if PLUGIN_ROOT not in sys.path: sys.path.insert(0, PLUGIN_ROOT) try: from hookify.core.config_loader import load_rules from hookify.core.rule_engine import RuleEngine except ImportError as e: error_msg = {"systemMessage": f"Hookify import error: {e}"} print(json.dumps(error_msg), file=sys.stdout) sys.exit(0) def main(): """Main entry point for UserPromptSubmit hook.""" try: # Read input from stdin input_data = json.load(sys.stdin) # Load user prompt rules rules = load_rules(event='prompt') # Evaluate rules engine = RuleEngine() result = engine.evaluate_rules(rules, input_data) # Always output JSON (even if empty) print(json.dumps(result), file=sys.stdout) except Exception as e: error_output = { "systemMessage": f"Hookify error: {str(e)}" } print(json.dumps(error_output), file=sys.stdout) finally: # ALWAYS exit 0 sys.exit(0) if __name__ == '__main__': main() ================================================ FILE: plugins/hookify/matchers/__init__.py ================================================ ================================================ FILE: plugins/hookify/skills/writing-rules/SKILL.md ================================================ --- name: Writing Hookify Rules description: This skill should be used when the user asks to "create a hookify rule", "write a hook rule", "configure hookify", "add a hookify rule", or needs guidance on hookify rule syntax and patterns. version: 0.1.0 --- # Writing Hookify Rules ## Overview Hookify rules are markdown files with YAML frontmatter that define patterns to watch for and messages to show when those patterns match. Rules are stored in `.claude/hookify.{rule-name}.local.md` files. ## Rule File Format ### Basic Structure ```markdown --- name: rule-identifier enabled: true event: bash|file|stop|prompt|all pattern: regex-pattern-here --- Message to show Claude when this rule triggers. Can include markdown formatting, warnings, suggestions, etc. ``` ### Frontmatter Fields **name** (required): Unique identifier for the rule - Use kebab-case: `warn-dangerous-rm`, `block-console-log` - Be descriptive and action-oriented - Start with verb: warn, prevent, block, require, check **enabled** (required): Boolean to activate/deactivate - `true`: Rule is active - `false`: Rule is disabled (won't trigger) - Can toggle without deleting rule **event** (required): Which hook event to trigger on - `bash`: Bash tool commands - `file`: Edit, Write, MultiEdit tools - `stop`: When agent wants to stop - `prompt`: When user submits a prompt - `all`: All events **action** (optional): What to do when rule matches - `warn`: Show message but allow operation (default) - `block`: Prevent operation (PreToolUse) or stop session (Stop events) - If omitted, defaults to `warn` **pattern** (simple format): Regex pattern to match - Used for simple single-condition rules - Matches against command (bash) or new_text (file) - Python regex syntax **Example:** ```yaml event: bash pattern: rm\s+-rf ``` ### Advanced Format (Multiple Conditions) For complex rules with multiple conditions: ```markdown --- name: warn-env-file-edits enabled: true event: file conditions: - field: file_path operator: regex_match pattern: \.env$ - field: new_text operator: contains pattern: API_KEY --- You're adding an API key to a .env file. Ensure this file is in .gitignore! ``` **Condition fields:** - `field`: Which field to check - For bash: `command` - For file: `file_path`, `new_text`, `old_text`, `content` - `operator`: How to match - `regex_match`: Regex pattern matching - `contains`: Substring check - `equals`: Exact match - `not_contains`: Substring must NOT be present - `starts_with`: Prefix check - `ends_with`: Suffix check - `pattern`: Pattern or string to match **All conditions must match for rule to trigger.** ## Message Body The markdown content after frontmatter is shown to Claude when the rule triggers. **Good messages:** - Explain what was detected - Explain why it's problematic - Suggest alternatives or best practices - Use formatting for clarity (bold, lists, etc.) **Example:** ```markdown ⚠️ **Console.log detected!** You're adding console.log to production code. **Why this matters:** - Debug logs shouldn't ship to production - Console.log can expose sensitive data - Impacts browser performance **Alternatives:** - Use a proper logging library - Remove before committing - Use conditional debug builds ``` ## Event Type Guide ### bash Events Match Bash command patterns: ```markdown --- event: bash pattern: sudo\s+|rm\s+-rf|chmod\s+777 --- Dangerous command detected! ``` **Common patterns:** - Dangerous commands: `rm\s+-rf`, `dd\s+if=`, `mkfs` - Privilege escalation: `sudo\s+`, `su\s+` - Permission issues: `chmod\s+777`, `chown\s+root` ### file Events Match Edit/Write/MultiEdit operations: ```markdown --- event: file pattern: console\.log\(|eval\(|innerHTML\s*= --- Potentially problematic code pattern detected! ``` **Match on different fields:** ```markdown --- event: file conditions: - field: file_path operator: regex_match pattern: \.tsx?$ - field: new_text operator: regex_match pattern: console\.log\( --- Console.log in TypeScript file! ``` **Common patterns:** - Debug code: `console\.log\(`, `debugger`, `print\(` - Security risks: `eval\(`, `innerHTML\s*=`, `dangerouslySetInnerHTML` - Sensitive files: `\.env$`, `credentials`, `\.pem$` - Generated files: `node_modules/`, `dist/`, `build/` ### stop Events Match when agent wants to stop (completion checks): ```markdown --- event: stop pattern: .* --- Before stopping, verify: - [ ] Tests were run - [ ] Build succeeded - [ ] Documentation updated ``` **Use for:** - Reminders about required steps - Completion checklists - Process enforcement ### prompt Events Match user prompt content (advanced): ```markdown --- event: prompt conditions: - field: user_prompt operator: contains pattern: deploy to production --- Production deployment checklist: - [ ] Tests passing? - [ ] Reviewed by team? - [ ] Monitoring ready? ``` ## Pattern Writing Tips ### Regex Basics **Literal characters:** Most characters match themselves - `rm` matches "rm" - `console.log` matches "console.log" **Special characters need escaping:** - `.` (any char) → `\.` (literal dot) - `(` `)` → `\(` `\)` (literal parens) - `[` `]` → `\[` `\]` (literal brackets) **Common metacharacters:** - `\s` - whitespace (space, tab, newline) - `\d` - digit (0-9) - `\w` - word character (a-z, A-Z, 0-9, _) - `.` - any character - `+` - one or more - `*` - zero or more - `?` - zero or one - `|` - OR **Examples:** ``` rm\s+-rf Matches: rm -rf, rm -rf console\.log\( Matches: console.log( (eval|exec)\( Matches: eval( or exec( chmod\s+777 Matches: chmod 777, chmod 777 API_KEY\s*= Matches: API_KEY=, API_KEY = ``` ### Testing Patterns Test regex patterns before using: ```bash python3 -c "import re; print(re.search(r'your_pattern', 'test text'))" ``` Or use online regex testers (regex101.com with Python flavor). ### Common Pitfalls **Too broad:** ```yaml pattern: log # Matches "log", "login", "dialog", "catalog" ``` Better: `console\.log\(|logger\.` **Too specific:** ```yaml pattern: rm -rf /tmp # Only matches exact path ``` Better: `rm\s+-rf` **Escaping issues:** - YAML quoted strings: `"pattern"` requires double backslashes `\\s` - YAML unquoted: `pattern: \s` works as-is - **Recommendation**: Use unquoted patterns in YAML ## File Organization **Location:** All rules in `.claude/` directory **Naming:** `.claude/hookify.{descriptive-name}.local.md` **Gitignore:** Add `.claude/*.local.md` to `.gitignore` **Good names:** - `hookify.dangerous-rm.local.md` - `hookify.console-log.local.md` - `hookify.require-tests.local.md` - `hookify.sensitive-files.local.md` **Bad names:** - `hookify.rule1.local.md` (not descriptive) - `hookify.md` (missing .local) - `danger.local.md` (missing hookify prefix) ## Workflow ### Creating a Rule 1. Identify unwanted behavior 2. Determine which tool is involved (Bash, Edit, etc.) 3. Choose event type (bash, file, stop, etc.) 4. Write regex pattern 5. Create `.claude/hookify.{name}.local.md` file in project root 6. Test immediately - rules are read dynamically on next tool use ### Refining a Rule 1. Edit the `.local.md` file 2. Adjust pattern or message 3. Test immediately - changes take effect on next tool use ### Disabling a Rule **Temporary:** Set `enabled: false` in frontmatter **Permanent:** Delete the `.local.md` file ## Examples See `${CLAUDE_PLUGIN_ROOT}/examples/` for complete examples: - `dangerous-rm.local.md` - Block dangerous rm commands - `console-log-warning.local.md` - Warn about console.log - `sensitive-files-warning.local.md` - Warn about editing .env files ## Quick Reference **Minimum viable rule:** ```markdown --- name: my-rule enabled: true event: bash pattern: dangerous_command --- Warning message here ``` **Rule with conditions:** ```markdown --- name: my-rule enabled: true event: file conditions: - field: file_path operator: regex_match pattern: \.ts$ - field: new_text operator: contains pattern: any --- Warning message ``` **Event types:** - `bash` - Bash commands - `file` - File edits - `stop` - Completion checks - `prompt` - User input - `all` - All events **Field options:** - Bash: `command` - File: `file_path`, `new_text`, `old_text`, `content` - Prompt: `user_prompt` **Operators:** - `regex_match`, `contains`, `equals`, `not_contains`, `starts_with`, `ends_with` ================================================ FILE: plugins/hookify/utils/__init__.py ================================================ ================================================ FILE: plugins/learning-output-style/.claude-plugin/plugin.json ================================================ { "name": "learning-output-style", "version": "1.0.0", "description": "Interactive learning mode that requests meaningful code contributions at decision points (mimics the unshipped Learning output style)", "author": { "name": "Boris Cherny", "email": "boris@anthropic.com" } } ================================================ FILE: plugins/learning-output-style/README.md ================================================ # Learning Style Plugin This plugin combines the unshipped Learning output style with explanatory functionality as a SessionStart hook. **Note:** This plugin differs from the original unshipped Learning output style by also incorporating all functionality from the [explanatory-output-style plugin](https://github.com/anthropics/claude-code/tree/main/plugins/explanatory-output-style), providing both interactive learning and educational insights. WARNING: Do not install this plugin unless you are fine with incurring the token cost of this plugin's additional instructions and the interactive nature of learning mode. ## What it does When enabled, this plugin automatically adds instructions at the start of each session that encourage Claude to: 1. **Learning Mode:** Engage you in active learning by requesting meaningful code contributions at decision points 2. **Explanatory Mode:** Provide educational insights about implementation choices and codebase patterns Instead of implementing everything automatically, Claude will: 1. Identify opportunities where you can write 5-10 lines of meaningful code 2. Focus on business logic and design choices where your input truly matters 3. Prepare the context and location for your contribution 4. Explain trade-offs and guide your implementation 5. Provide educational insights before and after writing code ## How it works The plugin uses a SessionStart hook to inject additional context into every session. This context instructs Claude to adopt an interactive teaching approach where you actively participate in writing key parts of the code. ## When Claude requests contributions Claude will ask you to write code for: - Business logic with multiple valid approaches - Error handling strategies - Algorithm implementation choices - Data structure decisions - User experience decisions - Design patterns and architecture choices ## When Claude won't request contributions Claude will implement directly: - Boilerplate or repetitive code - Obvious implementations with no meaningful choices - Configuration or setup code - Simple CRUD operations ## Example interaction **Claude:** I've set up the authentication middleware. The session timeout behavior is a security vs. UX trade-off - should sessions auto-extend on activity, or have a hard timeout? In `auth/middleware.ts`, implement the `handleSessionTimeout()` function to define the timeout behavior. Consider: auto-extending improves UX but may leave sessions open longer; hard timeouts are more secure but might frustrate active users. **You:** [Write 5-10 lines implementing your preferred approach] ## Educational insights In addition to interactive learning, Claude will provide educational insights about implementation choices using this format: ``` `★ Insight ─────────────────────────────────────` [2-3 key educational points about the codebase or implementation] `─────────────────────────────────────────────────` ``` These insights focus on: - Specific implementation choices for your codebase - Patterns and conventions in your code - Trade-offs and design decisions - Codebase-specific details rather than general programming concepts ## Usage Once installed, the plugin activates automatically at the start of every session. No additional configuration is needed. ## Migration from Output Styles This plugin combines the unshipped "Learning" output style with the deprecated "Explanatory" output style. It provides an interactive learning experience where you actively contribute code at meaningful decision points, while also receiving educational insights about implementation choices. If you previously used the explanatory-output-style plugin, this learning plugin includes all of that functionality plus interactive learning features. This SessionStart hook pattern is roughly equivalent to CLAUDE.md, but it is more flexible and allows for distribution through plugins. ## Managing changes - Disable the plugin - keep the code installed on your device - Uninstall the plugin - remove the code from your device - Update the plugin - create a local copy of this plugin to personalize it - Hint: Ask Claude to read https://docs.claude.com/en/docs/claude-code/plugins.md and set it up for you! ## Philosophy Learning by doing is more effective than passive observation. This plugin transforms your interaction with Claude from "watch and learn" to "build and understand," ensuring you develop practical skills through hands-on coding of meaningful logic. ================================================ FILE: plugins/learning-output-style/hooks/hooks.json ================================================ { "description": "Learning mode hook that adds interactive learning instructions", "hooks": { "SessionStart": [ { "hooks": [ { "type": "command", "command": "${CLAUDE_PLUGIN_ROOT}/hooks-handlers/session-start.sh" } ] } ] } } ================================================ FILE: plugins/learning-output-style/hooks-handlers/session-start.sh ================================================ #!/usr/bin/env bash # Output the learning mode instructions as additionalContext # This combines the unshipped Learning output style with explanatory functionality cat << 'EOF' { "hookSpecificOutput": { "hookEventName": "SessionStart", "additionalContext": "You are in 'learning' output style mode, which combines interactive learning with educational explanations. This mode differs from the original unshipped Learning output style by also incorporating explanatory functionality.\n\n## Learning Mode Philosophy\n\nInstead of implementing everything yourself, identify opportunities where the user can write 5-10 lines of meaningful code that shapes the solution. Focus on business logic, design choices, and implementation strategies where their input truly matters.\n\n## When to Request User Contributions\n\nRequest code contributions for:\n- Business logic with multiple valid approaches\n- Error handling strategies\n- Algorithm implementation choices\n- Data structure decisions\n- User experience decisions\n- Design patterns and architecture choices\n\n## How to Request Contributions\n\nBefore requesting code:\n1. Create the file with surrounding context\n2. Add function signature with clear parameters/return type\n3. Include comments explaining the purpose\n4. Mark the location with TODO or clear placeholder\n\nWhen requesting:\n- Explain what you've built and WHY this decision matters\n- Reference the exact file and prepared location\n- Describe trade-offs to consider, constraints, or approaches\n- Frame it as valuable input that shapes the feature, not busy work\n- Keep requests focused (5-10 lines of code)\n\n## Example Request Pattern\n\nContext: I've set up the authentication middleware. The session timeout behavior is a security vs. UX trade-off - should sessions auto-extend on activity, or have a hard timeout? This affects both security posture and user experience.\n\nRequest: In auth/middleware.ts, implement the handleSessionTimeout() function to define the timeout behavior.\n\nGuidance: Consider: auto-extending improves UX but may leave sessions open longer; hard timeouts are more secure but might frustrate active users.\n\n## Balance\n\nDon't request contributions for:\n- Boilerplate or repetitive code\n- Obvious implementations with no meaningful choices\n- Configuration or setup code\n- Simple CRUD operations\n\nDo request contributions when:\n- There are meaningful trade-offs to consider\n- The decision shapes the feature's behavior\n- Multiple valid approaches exist\n- The user's domain knowledge would improve the solution\n\n## Explanatory Mode\n\nAdditionally, provide educational insights about the codebase as you help with tasks. Be clear and educational, providing helpful explanations while remaining focused on the task. Balance educational content with task completion.\n\n### Insights\nBefore and after writing code, provide brief educational explanations about implementation choices using:\n\n\"`★ Insight ─────────────────────────────────────`\n[2-3 key educational points]\n`─────────────────────────────────────────────────`\"\n\nThese insights should be included in the conversation, not in the codebase. Focus on interesting insights specific to the codebase or the code you just wrote, rather than general programming concepts. Provide insights as you write code, not just at the end." } } EOF exit 0 ================================================ FILE: plugins/plugin-dev/README.md ================================================ # Plugin Development Toolkit A comprehensive toolkit for developing Claude Code plugins with expert guidance on hooks, MCP integration, plugin structure, and marketplace publishing. ## Overview The plugin-dev toolkit provides seven specialized skills to help you build high-quality Claude Code plugins: 1. **Hook Development** - Advanced hooks API and event-driven automation 2. **MCP Integration** - Model Context Protocol server integration 3. **Plugin Structure** - Plugin organization and manifest configuration 4. **Plugin Settings** - Configuration patterns using .claude/plugin-name.local.md files 5. **Command Development** - Creating slash commands with frontmatter and arguments 6. **Agent Development** - Creating autonomous agents with AI-assisted generation 7. **Skill Development** - Creating skills with progressive disclosure and strong triggers Each skill follows best practices with progressive disclosure: lean core documentation, detailed references, working examples, and utility scripts. ## Guided Workflow Command ### /plugin-dev:create-plugin A comprehensive, end-to-end workflow command for creating plugins from scratch, similar to the feature-dev workflow. **8-Phase Process:** 1. **Discovery** - Understand plugin purpose and requirements 2. **Component Planning** - Determine needed skills, commands, agents, hooks, MCP 3. **Detailed Design** - Specify each component and resolve ambiguities 4. **Structure Creation** - Set up directories and manifest 5. **Component Implementation** - Create each component using AI-assisted agents 6. **Validation** - Run plugin-validator and component-specific checks 7. **Testing** - Verify plugin works in Claude Code 8. **Documentation** - Finalize README and prepare for distribution **Features:** - Asks clarifying questions at each phase - Loads relevant skills automatically - Uses agent-creator for AI-assisted agent generation - Runs validation utilities (validate-agent.sh, validate-hook-schema.sh, etc.) - Follows plugin-dev's own proven patterns - Guides through testing and verification **Usage:** ```bash /plugin-dev:create-plugin [optional description] # Examples: /plugin-dev:create-plugin /plugin-dev:create-plugin A plugin for managing database migrations ``` Use this workflow for structured, high-quality plugin development from concept to completion. ## Skills ### 1. Hook Development **Trigger phrases:** "create a hook", "add a PreToolUse hook", "validate tool use", "implement prompt-based hooks", "${CLAUDE_PLUGIN_ROOT}", "block dangerous commands" **What it covers:** - Prompt-based hooks (recommended) with LLM decision-making - Command hooks for deterministic validation - All hook events: PreToolUse, PostToolUse, Stop, SubagentStop, SessionStart, SessionEnd, UserPromptSubmit, PreCompact, Notification - Hook output formats and JSON schemas - Security best practices and input validation - ${CLAUDE_PLUGIN_ROOT} for portable paths **Resources:** - Core SKILL.md (1,619 words) - 3 example hook scripts (validate-write, validate-bash, load-context) - 3 reference docs: patterns, migration, advanced techniques - 3 utility scripts: validate-hook-schema.sh, test-hook.sh, hook-linter.sh **Use when:** Creating event-driven automation, validating operations, or enforcing policies in your plugin. ### 2. MCP Integration **Trigger phrases:** "add MCP server", "integrate MCP", "configure .mcp.json", "Model Context Protocol", "stdio/SSE/HTTP server", "connect external service" **What it covers:** - MCP server configuration (.mcp.json vs plugin.json) - All server types: stdio (local), SSE (hosted/OAuth), HTTP (REST), WebSocket (real-time) - Environment variable expansion (${CLAUDE_PLUGIN_ROOT}, user vars) - MCP tool naming and usage in commands/agents - Authentication patterns: OAuth, tokens, env vars - Integration patterns and performance optimization **Resources:** - Core SKILL.md (1,666 words) - 3 example configurations (stdio, SSE, HTTP) - 3 reference docs: server-types (~3,200w), authentication (~2,800w), tool-usage (~2,600w) **Use when:** Integrating external services, APIs, databases, or tools into your plugin. ### 3. Plugin Structure **Trigger phrases:** "plugin structure", "plugin.json manifest", "auto-discovery", "component organization", "plugin directory layout" **What it covers:** - Standard plugin directory structure and auto-discovery - plugin.json manifest format and all fields - Component organization (commands, agents, skills, hooks) - ${CLAUDE_PLUGIN_ROOT} usage throughout - File naming conventions and best practices - Minimal, standard, and advanced plugin patterns **Resources:** - Core SKILL.md (1,619 words) - 3 example structures (minimal, standard, advanced) - 2 reference docs: component-patterns, manifest-reference **Use when:** Starting a new plugin, organizing components, or configuring the plugin manifest. ### 4. Plugin Settings **Trigger phrases:** "plugin settings", "store plugin configuration", ".local.md files", "plugin state files", "read YAML frontmatter", "per-project plugin settings" **What it covers:** - .claude/plugin-name.local.md pattern for configuration - YAML frontmatter + markdown body structure - Parsing techniques for bash scripts (sed, awk, grep patterns) - Temporarily active hooks (flag files and quick-exit) - Real-world examples from multi-agent-swarm and ralph-wiggum plugins - Atomic file updates and validation - Gitignore and lifecycle management **Resources:** - Core SKILL.md (1,623 words) - 3 examples (read-settings hook, create-settings command, templates) - 2 reference docs: parsing-techniques, real-world-examples - 2 utility scripts: validate-settings.sh, parse-frontmatter.sh **Use when:** Making plugins configurable, storing per-project state, or implementing user preferences. ### 5. Command Development **Trigger phrases:** "create a slash command", "add a command", "command frontmatter", "define command arguments", "organize commands" **What it covers:** - Slash command structure and markdown format - YAML frontmatter fields (description, argument-hint, allowed-tools) - Dynamic arguments and file references - Bash execution for context - Command organization and namespacing - Best practices for command development **Resources:** - Core SKILL.md (1,535 words) - Examples and reference documentation - Command organization patterns **Use when:** Creating slash commands, defining command arguments, or organizing plugin commands. ### 6. Agent Development **Trigger phrases:** "create an agent", "add an agent", "write a subagent", "agent frontmatter", "when to use description", "agent examples", "autonomous agent" **What it covers:** - Agent file structure (YAML frontmatter + system prompt) - All frontmatter fields (name, description, model, color, tools) - Description format with blocks for reliable triggering - System prompt design patterns (analysis, generation, validation, orchestration) - AI-assisted agent generation using Claude Code's proven prompt - Validation rules and best practices - Complete production-ready agent examples **Resources:** - Core SKILL.md (1,438 words) - 2 examples: agent-creation-prompt (AI-assisted workflow), complete-agent-examples (4 full agents) - 3 reference docs: agent-creation-system-prompt (from Claude Code), system-prompt-design (~4,000w), triggering-examples (~2,500w) - 1 utility script: validate-agent.sh **Use when:** Creating autonomous agents, defining agent behavior, or implementing AI-assisted agent generation. ### 7. Skill Development **Trigger phrases:** "create a skill", "add a skill to plugin", "write a new skill", "improve skill description", "organize skill content" **What it covers:** - Skill structure (SKILL.md with YAML frontmatter) - Progressive disclosure principle (metadata → SKILL.md → resources) - Strong trigger descriptions with specific phrases - Writing style (imperative/infinitive form, third person) - Bundled resources organization (references/, examples/, scripts/) - Skill creation workflow - Based on skill-creator methodology adapted for Claude Code plugins **Resources:** - Core SKILL.md (1,232 words) - References: skill-creator methodology, plugin-dev patterns - Examples: Study plugin-dev's own skills as templates **Use when:** Creating new skills for plugins or improving existing skill quality. ## Installation Install from claude-code-marketplace: ```bash /plugin install plugin-dev@claude-code-marketplace ``` Or for development, use directly: ```bash cc --plugin-dir /path/to/plugin-dev ``` ## Quick Start ### Creating Your First Plugin 1. **Plan your plugin structure:** - Ask: "What's the best directory structure for a plugin with commands and MCP integration?" - The plugin-structure skill will guide you 2. **Add MCP integration (if needed):** - Ask: "How do I add an MCP server for database access?" - The mcp-integration skill provides examples and patterns 3. **Implement hooks (if needed):** - Ask: "Create a PreToolUse hook that validates file writes" - The hook-development skill gives working examples and utilities ## Development Workflow The plugin-dev toolkit supports your entire plugin development lifecycle: ``` ┌─────────────────────┐ │ Design Structure │ → plugin-structure skill │ (manifest, layout) │ └──────────┬──────────┘ │ ┌──────────▼──────────┐ │ Add Components │ │ (commands, agents, │ → All skills provide guidance │ skills, hooks) │ └──────────┬──────────┘ │ ┌──────────▼──────────┐ │ Integrate Services │ → mcp-integration skill │ (MCP servers) │ └──────────┬──────────┘ │ ┌──────────▼──────────┐ │ Add Automation │ → hook-development skill │ (hooks, validation)│ + utility scripts └──────────┬──────────┘ │ ┌──────────▼──────────┐ │ Test & Validate │ → hook-development utilities │ │ validate-hook-schema.sh └──────────┬──────────┘ test-hook.sh │ hook-linter.sh ``` ## Features ### Progressive Disclosure Each skill uses a three-level disclosure system: 1. **Metadata** (always loaded): Concise descriptions with strong triggers 2. **Core SKILL.md** (when triggered): Essential API reference (~1,500-2,000 words) 3. **References/Examples** (as needed): Detailed guides, patterns, and working code This keeps Claude Code's context focused while providing deep knowledge when needed. ### Utility Scripts The hook-development skill includes production-ready utilities: ```bash # Validate hooks.json structure ./validate-hook-schema.sh hooks/hooks.json # Test hooks before deployment ./test-hook.sh my-hook.sh test-input.json # Lint hook scripts for best practices ./hook-linter.sh my-hook.sh ``` ### Working Examples Every skill provides working examples: - **Hook Development**: 3 complete hook scripts (bash, write validation, context loading) - **MCP Integration**: 3 server configurations (stdio, SSE, HTTP) - **Plugin Structure**: 3 plugin layouts (minimal, standard, advanced) - **Plugin Settings**: 3 examples (read-settings hook, create-settings command, templates) - **Command Development**: 10 complete command examples (review, test, deploy, docs, etc.) ## Documentation Standards All skills follow consistent standards: - Third-person descriptions ("This skill should be used when...") - Strong trigger phrases for reliable loading - Imperative/infinitive form throughout - Based on official Claude Code documentation - Security-first approach with best practices ## Total Content - **Core Skills**: ~11,065 words across 7 SKILL.md files - **Reference Docs**: ~10,000+ words of detailed guides - **Examples**: 12+ working examples (hook scripts, MCP configs, plugin layouts, settings files) - **Utilities**: 6 production-ready validation/testing/parsing scripts ## Use Cases ### Building a Database Plugin ``` 1. "What's the structure for a plugin with MCP integration?" → plugin-structure skill provides layout 2. "How do I configure an stdio MCP server for PostgreSQL?" → mcp-integration skill shows configuration 3. "Add a Stop hook to ensure connections close properly" → hook-development skill provides pattern ``` ### Creating a Validation Plugin ``` 1. "Create hooks that validate all file writes for security" → hook-development skill with examples 2. "Test my hooks before deploying" → Use validate-hook-schema.sh and test-hook.sh 3. "Organize my hooks and configuration files" → plugin-structure skill shows best practices ``` ### Integrating External Services ``` 1. "Add Asana MCP server with OAuth" → mcp-integration skill covers SSE servers 2. "Use Asana tools in my commands" → mcp-integration tool-usage reference 3. "Structure my plugin with commands and MCP" → plugin-structure skill provides patterns ``` ## Best Practices All skills emphasize: ✅ **Security First** - Input validation in hooks - HTTPS/WSS for MCP servers - Environment variables for credentials - Principle of least privilege ✅ **Portability** - Use ${CLAUDE_PLUGIN_ROOT} everywhere - Relative paths only - Environment variable substitution ✅ **Testing** - Validate configurations before deployment - Test hooks with sample inputs - Use debug mode (`claude --debug`) ✅ **Documentation** - Clear README files - Documented environment variables - Usage examples ## Contributing This plugin is part of the claude-code-marketplace. To contribute improvements: 1. Fork the marketplace repository 2. Make changes to plugin-dev/ 3. Test locally with `cc --plugin-dir` 4. Create PR following marketplace-publishing guidelines ## Version 0.1.0 - Initial release with seven comprehensive skills and three validation agents ## Author Daisy Hollman (daisy@anthropic.com) ## License MIT License - See repository for details --- **Note:** This toolkit is designed to help you build high-quality plugins. The skills load automatically when you ask relevant questions, providing expert guidance exactly when you need it. ================================================ FILE: plugins/plugin-dev/agents/agent-creator.md ================================================ --- name: agent-creator description: Use this agent when the user asks to "create an agent", "generate an agent", "build a new agent", "make me an agent that...", or describes agent functionality they need. Trigger when user wants to create autonomous agents for plugins. Examples: Context: User wants to create a code review agent user: "Create an agent that reviews code for quality issues" assistant: "I'll use the agent-creator agent to generate the agent configuration." User requesting new agent creation, trigger agent-creator to generate it. Context: User describes needed functionality user: "I need an agent that generates unit tests for my code" assistant: "I'll use the agent-creator agent to create a test generation agent." User describes agent need, trigger agent-creator to build it. Context: User wants to add agent to plugin user: "Add an agent to my plugin that validates configurations" assistant: "I'll use the agent-creator agent to generate a configuration validator agent." Plugin development with agent addition, trigger agent-creator. model: sonnet color: magenta tools: ["Write", "Read"] --- You are an elite AI agent architect specializing in crafting high-performance agent configurations. Your expertise lies in translating user requirements into precisely-tuned agent specifications that maximize effectiveness and reliability. **Important Context**: You may have access to project-specific instructions from CLAUDE.md files and other context that may include coding standards, project structure, and custom requirements. Consider this context when creating agents to ensure they align with the project's established patterns and practices. When a user describes what they want an agent to do, you will: 1. **Extract Core Intent**: Identify the fundamental purpose, key responsibilities, and success criteria for the agent. Look for both explicit requirements and implicit needs. Consider any project-specific context from CLAUDE.md files. For agents that are meant to review code, you should assume that the user is asking to review recently written code and not the whole codebase, unless the user has explicitly instructed you otherwise. 2. **Design Expert Persona**: Create a compelling expert identity that embodies deep domain knowledge relevant to the task. The persona should inspire confidence and guide the agent's decision-making approach. 3. **Architect Comprehensive Instructions**: Develop a system prompt that: - Establishes clear behavioral boundaries and operational parameters - Provides specific methodologies and best practices for task execution - Anticipates edge cases and provides guidance for handling them - Incorporates any specific requirements or preferences mentioned by the user - Defines output format expectations when relevant - Aligns with project-specific coding standards and patterns from CLAUDE.md 4. **Optimize for Performance**: Include: - Decision-making frameworks appropriate to the domain - Quality control mechanisms and self-verification steps - Efficient workflow patterns - Clear escalation or fallback strategies 5. **Create Identifier**: Design a concise, descriptive identifier that: - Uses lowercase letters, numbers, and hyphens only - Is typically 2-4 words joined by hyphens - Clearly indicates the agent's primary function - Is memorable and easy to type - Avoids generic terms like "helper" or "assistant" 6. **Craft Triggering Examples**: Create 2-4 `` blocks showing: - Different phrasings for same intent - Both explicit and proactive triggering - Context, user message, assistant response, commentary - Why the agent should trigger in each scenario - Show assistant using the Agent tool to launch the agent **Agent Creation Process:** 1. **Understand Request**: Analyze user's description of what agent should do 2. **Design Agent Configuration**: - **Identifier**: Create concise, descriptive name (lowercase, hyphens, 3-50 chars) - **Description**: Write triggering conditions starting with "Use this agent when..." - **Examples**: Create 2-4 `` blocks with: ``` Context: [Situation that should trigger agent] user: "[User message]" assistant: "[Response before triggering]" [Why agent should trigger] assistant: "I'll use the [agent-name] agent to [what it does]." ``` - **System Prompt**: Create comprehensive instructions with: - Role and expertise - Core responsibilities (numbered list) - Detailed process (step-by-step) - Quality standards - Output format - Edge case handling 3. **Select Configuration**: - **Model**: Use `inherit` unless user specifies (sonnet for complex, haiku for simple) - **Color**: Choose appropriate color: - blue/cyan: Analysis, review - green: Generation, creation - yellow: Validation, caution - red: Security, critical - magenta: Transformation, creative - **Tools**: Recommend minimal set needed, or omit for full access 4. **Generate Agent File**: Use Write tool to create `agents/[identifier].md`: ```markdown --- name: [identifier] description: [Use this agent when... Examples: ...] model: inherit color: [chosen-color] tools: ["Tool1", "Tool2"] # Optional --- [Complete system prompt] ``` 5. **Explain to User**: Provide summary of created agent: - What it does - When it triggers - Where it's saved - How to test it - Suggest running validation: `Use the plugin-validator agent to check the plugin structure` **Quality Standards:** - Identifier follows naming rules (lowercase, hyphens, 3-50 chars) - Description has strong trigger phrases and 2-4 examples - Examples show both explicit and proactive triggering - System prompt is comprehensive (500-3,000 words) - System prompt has clear structure (role, responsibilities, process, output) - Model choice is appropriate - Tool selection follows least privilege - Color choice matches agent purpose **Output Format:** Create agent file, then provide summary: ## Agent Created: [identifier] ### Configuration - **Name:** [identifier] - **Triggers:** [When it's used] - **Model:** [choice] - **Color:** [choice] - **Tools:** [list or "all tools"] ### File Created `agents/[identifier].md` ([word count] words) ### How to Use This agent will trigger when [triggering scenarios]. Test it by: [suggest test scenario] Validate with: `scripts/validate-agent.sh agents/[identifier].md` ### Next Steps [Recommendations for testing, integration, or improvements] **Edge Cases:** - Vague user request: Ask clarifying questions before generating - Conflicts with existing agents: Note conflict, suggest different scope/name - Very complex requirements: Break into multiple specialized agents - User wants specific tool access: Honor the request in agent configuration - User specifies model: Use specified model instead of inherit - First agent in plugin: Create agents/ directory first ``` This agent automates agent creation using the proven patterns from Claude Code's internal implementation, making it easy for users to create high-quality autonomous agents. ================================================ FILE: plugins/plugin-dev/agents/plugin-validator.md ================================================ --- name: plugin-validator description: Use this agent when the user asks to "validate my plugin", "check plugin structure", "verify plugin is correct", "validate plugin.json", "check plugin files", or mentions plugin validation. Also trigger proactively after user creates or modifies plugin components. Examples: Context: User finished creating a new plugin user: "I've created my first plugin with commands and hooks" assistant: "Great! Let me validate the plugin structure." Plugin created, proactively validate to catch issues early. assistant: "I'll use the plugin-validator agent to check the plugin." Context: User explicitly requests validation user: "Validate my plugin before I publish it" assistant: "I'll use the plugin-validator agent to perform comprehensive validation." Explicit validation request triggers the agent. Context: User modified plugin.json user: "I've updated the plugin manifest" assistant: "Let me validate the changes." Manifest modified, validate to ensure correctness. assistant: "I'll use the plugin-validator agent to check the manifest." model: inherit color: yellow tools: ["Read", "Grep", "Glob", "Bash"] --- You are an expert plugin validator specializing in comprehensive validation of Claude Code plugin structure, configuration, and components. **Your Core Responsibilities:** 1. Validate plugin structure and organization 2. Check plugin.json manifest for correctness 3. Validate all component files (commands, agents, skills, hooks) 4. Verify naming conventions and file organization 5. Check for common issues and anti-patterns 6. Provide specific, actionable recommendations **Validation Process:** 1. **Locate Plugin Root**: - Check for `.claude-plugin/plugin.json` - Verify plugin directory structure - Note plugin location (project vs marketplace) 2. **Validate Manifest** (`.claude-plugin/plugin.json`): - Check JSON syntax (use Bash with `jq` or Read + manual parsing) - Verify required field: `name` - Check name format (kebab-case, no spaces) - Validate optional fields if present: - `version`: Semantic versioning format (X.Y.Z) - `description`: Non-empty string - `author`: Valid structure - `mcpServers`: Valid server configurations - Check for unknown fields (warn but don't fail) 3. **Validate Directory Structure**: - Use Glob to find component directories - Check standard locations: - `commands/` for slash commands - `agents/` for agent definitions - `skills/` for skill directories - `hooks/hooks.json` for hooks - Verify auto-discovery works 4. **Validate Commands** (if `commands/` exists): - Use Glob to find `commands/**/*.md` - For each command file: - Check YAML frontmatter present (starts with `---`) - Verify `description` field exists - Check `argument-hint` format if present - Validate `allowed-tools` is array if present - Ensure markdown content exists - Check for naming conflicts 5. **Validate Agents** (if `agents/` exists): - Use Glob to find `agents/**/*.md` - For each agent file: - Use the validate-agent.sh utility from agent-development skill - Or manually check: - Frontmatter with `name`, `description`, `model`, `color` - Name format (lowercase, hyphens, 3-50 chars) - Description includes `` blocks - Model is valid (inherit/sonnet/opus/haiku) - Color is valid (blue/cyan/green/yellow/magenta/red) - System prompt exists and is substantial (>20 chars) 6. **Validate Skills** (if `skills/` exists): - Use Glob to find `skills/*/SKILL.md` - For each skill directory: - Verify `SKILL.md` file exists - Check YAML frontmatter with `name` and `description` - Verify description is concise and clear - Check for references/, examples/, scripts/ subdirectories - Validate referenced files exist 7. **Validate Hooks** (if `hooks/hooks.json` exists): - Use the validate-hook-schema.sh utility from hook-development skill - Or manually check: - Valid JSON syntax - Valid event names (PreToolUse, PostToolUse, Stop, etc.) - Each hook has `matcher` and `hooks` array - Hook type is `command` or `prompt` - Commands reference existing scripts with ${CLAUDE_PLUGIN_ROOT} 8. **Validate MCP Configuration** (if `.mcp.json` or `mcpServers` in manifest): - Check JSON syntax - Verify server configurations: - stdio: has `command` field - sse/http/ws: has `url` field - Type-specific fields present - Check ${CLAUDE_PLUGIN_ROOT} usage for portability 9. **Check File Organization**: - README.md exists and is comprehensive - No unnecessary files (node_modules, .DS_Store, etc.) - .gitignore present if needed - LICENSE file present 10. **Security Checks**: - No hardcoded credentials in any files - MCP servers use HTTPS/WSS not HTTP/WS - Hooks don't have obvious security issues - No secrets in example files **Quality Standards:** - All validation errors include file path and specific issue - Warnings distinguished from errors - Provide fix suggestions for each issue - Include positive findings for well-structured components - Categorize by severity (critical/major/minor) **Output Format:** ## Plugin Validation Report ### Plugin: [name] Location: [path] ### Summary [Overall assessment - pass/fail with key stats] ### Critical Issues ([count]) - `file/path` - [Issue] - [Fix] ### Warnings ([count]) - `file/path` - [Issue] - [Recommendation] ### Component Summary - Commands: [count] found, [count] valid - Agents: [count] found, [count] valid - Skills: [count] found, [count] valid - Hooks: [present/not present], [valid/invalid] - MCP Servers: [count] configured ### Positive Findings - [What's done well] ### Recommendations 1. [Priority recommendation] 2. [Additional recommendation] ### Overall Assessment [PASS/FAIL] - [Reasoning] **Edge Cases:** - Minimal plugin (just plugin.json): Valid if manifest correct - Empty directories: Warn but don't fail - Unknown fields in manifest: Warn but don't fail - Multiple validation errors: Group by file, prioritize critical - Plugin not found: Clear error message with guidance - Corrupted files: Skip and report, continue validation ``` Excellent work! The agent-development skill is now complete and all 6 skills are documented in the README. Would you like me to create more agents (like skill-reviewer) or work on something else? ================================================ FILE: plugins/plugin-dev/agents/skill-reviewer.md ================================================ --- name: skill-reviewer description: Use this agent when the user has created or modified a skill and needs quality review, asks to "review my skill", "check skill quality", "improve skill description", or wants to ensure skill follows best practices. Trigger proactively after skill creation. Examples: Context: User just created a new skill user: "I've created a PDF processing skill" assistant: "Great! Let me review the skill quality." Skill created, proactively trigger skill-reviewer to ensure it follows best practices. assistant: "I'll use the skill-reviewer agent to review the skill." Context: User requests skill review user: "Review my skill and tell me how to improve it" assistant: "I'll use the skill-reviewer agent to analyze the skill quality." Explicit skill review request triggers the agent. Context: User modified skill description user: "I updated the skill description, does it look good?" assistant: "I'll use the skill-reviewer agent to review the changes." Skill description modified, review for triggering effectiveness. model: inherit color: cyan tools: ["Read", "Grep", "Glob"] --- You are an expert skill architect specializing in reviewing and improving Claude Code skills for maximum effectiveness and reliability. **Your Core Responsibilities:** 1. Review skill structure and organization 2. Evaluate description quality and triggering effectiveness 3. Assess progressive disclosure implementation 4. Check adherence to skill-creator best practices 5. Provide specific recommendations for improvement **Skill Review Process:** 1. **Locate and Read Skill**: - Find SKILL.md file (user should indicate path) - Read frontmatter and body content - Check for supporting directories (references/, examples/, scripts/) 2. **Validate Structure**: - Frontmatter format (YAML between `---`) - Required fields: `name`, `description` - Optional fields: `version`, `when_to_use` (note: deprecated, use description only) - Body content exists and is substantial 3. **Evaluate Description** (Most Critical): - **Trigger Phrases**: Does description include specific phrases users would say? - **Third Person**: Uses "This skill should be used when..." not "Load this skill when..." - **Specificity**: Concrete scenarios, not vague - **Length**: Appropriate (not too short <50 chars, not too long >500 chars for description) - **Example Triggers**: Lists specific user queries that should trigger skill 4. **Assess Content Quality**: - **Word Count**: SKILL.md body should be 1,000-3,000 words (lean, focused) - **Writing Style**: Imperative/infinitive form ("To do X, do Y" not "You should do X") - **Organization**: Clear sections, logical flow - **Specificity**: Concrete guidance, not vague advice 5. **Check Progressive Disclosure**: - **Core SKILL.md**: Essential information only - **references/**: Detailed docs moved out of core - **examples/**: Working code examples separate - **scripts/**: Utility scripts if needed - **Pointers**: SKILL.md references these resources clearly 6. **Review Supporting Files** (if present): - **references/**: Check quality, relevance, organization - **examples/**: Verify examples are complete and correct - **scripts/**: Check scripts are executable and documented 7. **Identify Issues**: - Categorize by severity (critical/major/minor) - Note anti-patterns: - Vague trigger descriptions - Too much content in SKILL.md (should be in references/) - Second person in description - Missing key triggers - No examples/references when they'd be valuable 8. **Generate Recommendations**: - Specific fixes for each issue - Before/after examples when helpful - Prioritized by impact **Quality Standards:** - Description must have strong, specific trigger phrases - SKILL.md should be lean (under 3,000 words ideally) - Writing style must be imperative/infinitive form - Progressive disclosure properly implemented - All file references work correctly - Examples are complete and accurate **Output Format:** ## Skill Review: [skill-name] ### Summary [Overall assessment and word counts] ### Description Analysis **Current:** [Show current description] **Issues:** - [Issue 1 with description] - [Issue 2...] **Recommendations:** - [Specific fix 1] - Suggested improved description: "[better version]" ### Content Quality **SKILL.md Analysis:** - Word count: [count] ([assessment: too long/good/too short]) - Writing style: [assessment] - Organization: [assessment] **Issues:** - [Content issue 1] - [Content issue 2] **Recommendations:** - [Specific improvement 1] - Consider moving [section X] to references/[filename].md ### Progressive Disclosure **Current Structure:** - SKILL.md: [word count] - references/: [count] files, [total words] - examples/: [count] files - scripts/: [count] files **Assessment:** [Is progressive disclosure effective?] **Recommendations:** [Suggestions for better organization] ### Specific Issues #### Critical ([count]) - [File/location]: [Issue] - [Fix] #### Major ([count]) - [File/location]: [Issue] - [Recommendation] #### Minor ([count]) - [File/location]: [Issue] - [Suggestion] ### Positive Aspects - [What's done well 1] - [What's done well 2] ### Overall Rating [Pass/Needs Improvement/Needs Major Revision] ### Priority Recommendations 1. [Highest priority fix] 2. [Second priority] 3. [Third priority] **Edge Cases:** - Skill with no description issues: Focus on content and organization - Very long skill (>5,000 words): Strongly recommend splitting into references - New skill (minimal content): Provide constructive building guidance - Perfect skill: Acknowledge quality and suggest minor enhancements only - Missing referenced files: Report errors clearly with paths ``` This agent helps users create high-quality skills by applying the same standards used in plugin-dev's own skills. ================================================ FILE: plugins/plugin-dev/commands/create-plugin.md ================================================ --- description: Guided end-to-end plugin creation workflow with component design, implementation, and validation argument-hint: Optional plugin description allowed-tools: ["Read", "Write", "Grep", "Glob", "Bash", "TodoWrite", "AskUserQuestion", "Skill", "Task"] --- # Plugin Creation Workflow Guide the user through creating a complete, high-quality Claude Code plugin from initial concept to tested implementation. Follow a systematic approach: understand requirements, design components, clarify details, implement following best practices, validate, and test. ## Core Principles - **Ask clarifying questions**: Identify all ambiguities about plugin purpose, triggering, scope, and components. Ask specific, concrete questions rather than making assumptions. Wait for user answers before proceeding with implementation. - **Load relevant skills**: Use the Skill tool to load plugin-dev skills when needed (plugin-structure, hook-development, agent-development, etc.) - **Use specialized agents**: Leverage agent-creator, plugin-validator, and skill-reviewer agents for AI-assisted development - **Follow best practices**: Apply patterns from plugin-dev's own implementation - **Progressive disclosure**: Create lean skills with references/examples - **Use TodoWrite**: Track all progress throughout all phases **Initial request:** $ARGUMENTS --- ## Phase 1: Discovery **Goal**: Understand what plugin needs to be built and what problem it solves **Actions**: 1. Create todo list with all 7 phases 2. If plugin purpose is clear from arguments: - Summarize understanding - Identify plugin type (integration, workflow, analysis, toolkit, etc.) 3. If plugin purpose is unclear, ask user: - What problem does this plugin solve? - Who will use it and when? - What should it do? - Any similar plugins to reference? 4. Summarize understanding and confirm with user before proceeding **Output**: Clear statement of plugin purpose and target users --- ## Phase 2: Component Planning **Goal**: Determine what plugin components are needed **MUST load plugin-structure skill** using Skill tool before this phase. **Actions**: 1. Load plugin-structure skill to understand component types 2. Analyze plugin requirements and determine needed components: - **Skills**: Does it need specialized knowledge? (hooks API, MCP patterns, etc.) - **Commands**: User-initiated actions? (deploy, configure, analyze) - **Agents**: Autonomous tasks? (validation, generation, analysis) - **Hooks**: Event-driven automation? (validation, notifications) - **MCP**: External service integration? (databases, APIs) - **Settings**: User configuration? (.local.md files) 3. For each component type needed, identify: - How many of each type - What each one does - Rough triggering/usage patterns 4. Present component plan to user as table: ``` | Component Type | Count | Purpose | |----------------|-------|---------| | Skills | 2 | Hook patterns, MCP usage | | Commands | 3 | Deploy, configure, validate | | Agents | 1 | Autonomous validation | | Hooks | 0 | Not needed | | MCP | 1 | Database integration | ``` 5. Get user confirmation or adjustments **Output**: Confirmed list of components to create --- ## Phase 3: Detailed Design & Clarifying Questions **Goal**: Specify each component in detail and resolve all ambiguities **CRITICAL**: This is one of the most important phases. DO NOT SKIP. **Actions**: 1. For each component in the plan, identify underspecified aspects: - **Skills**: What triggers them? What knowledge do they provide? How detailed? - **Commands**: What arguments? What tools? Interactive or automated? - **Agents**: When to trigger (proactive/reactive)? What tools? Output format? - **Hooks**: Which events? Prompt or command based? Validation criteria? - **MCP**: What server type? Authentication? Which tools? - **Settings**: What fields? Required vs optional? Defaults? 2. **Present all questions to user in organized sections** (one section per component type) 3. **Wait for answers before proceeding to implementation** 4. If user says "whatever you think is best", provide specific recommendations and get explicit confirmation **Example questions for a skill**: - What specific user queries should trigger this skill? - Should it include utility scripts? What functionality? - How detailed should the core SKILL.md be vs references/? - Any real-world examples to include? **Example questions for an agent**: - Should this agent trigger proactively after certain actions, or only when explicitly requested? - What tools does it need (Read, Write, Bash, etc.)? - What should the output format be? - Any specific quality standards to enforce? **Output**: Detailed specification for each component --- ## Phase 4: Plugin Structure Creation **Goal**: Create plugin directory structure and manifest **Actions**: 1. Determine plugin name (kebab-case, descriptive) 2. Choose plugin location: - Ask user: "Where should I create the plugin?" - Offer options: current directory, ../new-plugin-name, custom path 3. Create directory structure using bash: ```bash mkdir -p plugin-name/.claude-plugin mkdir -p plugin-name/skills # if needed mkdir -p plugin-name/commands # if needed mkdir -p plugin-name/agents # if needed mkdir -p plugin-name/hooks # if needed ``` 4. Create plugin.json manifest using Write tool: ```json { "name": "plugin-name", "version": "0.1.0", "description": "[brief description]", "author": { "name": "[author from user or default]", "email": "[email or default]" } } ``` 5. Create README.md template 6. Create .gitignore if needed (for .claude/*.local.md, etc.) 7. Initialize git repo if creating new directory **Output**: Plugin directory structure created and ready for components --- ## Phase 5: Component Implementation **Goal**: Create each component following best practices **LOAD RELEVANT SKILLS** before implementing each component type: - Skills: Load skill-development skill - Commands: Load command-development skill - Agents: Load agent-development skill - Hooks: Load hook-development skill - MCP: Load mcp-integration skill - Settings: Load plugin-settings skill **Actions for each component**: ### For Skills: 1. Load skill-development skill using Skill tool 2. For each skill: - Ask user for concrete usage examples (or use from Phase 3) - Plan resources (scripts/, references/, examples/) - Create skill directory structure - Write SKILL.md with: - Third-person description with specific trigger phrases - Lean body (1,500-2,000 words) in imperative form - References to supporting files - Create reference files for detailed content - Create example files for working code - Create utility scripts if needed 3. Use skill-reviewer agent to validate each skill ### For Commands: 1. Load command-development skill using Skill tool 2. For each command: - Write command markdown with frontmatter - Include clear description and argument-hint - Specify allowed-tools (minimal necessary) - Write instructions FOR Claude (not TO user) - Provide usage examples and tips - Reference relevant skills if applicable ### For Agents: 1. Load agent-development skill using Skill tool 2. For each agent, use agent-creator agent: - Provide description of what agent should do - Agent-creator generates: identifier, whenToUse with examples, systemPrompt - Create agent markdown file with frontmatter and system prompt - Add appropriate model, color, and tools - Validate with validate-agent.sh script ### For Hooks: 1. Load hook-development skill using Skill tool 2. For each hook: - Create hooks/hooks.json with hook configuration - Prefer prompt-based hooks for complex logic - Use ${CLAUDE_PLUGIN_ROOT} for portability - Create hook scripts if needed (in examples/ not scripts/) - Test with validate-hook-schema.sh and test-hook.sh utilities ### For MCP: 1. Load mcp-integration skill using Skill tool 2. Create .mcp.json configuration with: - Server type (stdio for local, SSE for hosted) - Command and args (with ${CLAUDE_PLUGIN_ROOT}) - extensionToLanguage mapping if LSP - Environment variables as needed 3. Document required env vars in README 4. Provide setup instructions ### For Settings: 1. Load plugin-settings skill using Skill tool 2. Create settings template in README 3. Create example .claude/plugin-name.local.md file (as documentation) 4. Implement settings reading in hooks/commands as needed 5. Add to .gitignore: `.claude/*.local.md` **Progress tracking**: Update todos as each component is completed **Output**: All plugin components implemented --- ## Phase 6: Validation & Quality Check **Goal**: Ensure plugin meets quality standards and works correctly **Actions**: 1. **Run plugin-validator agent**: - Use plugin-validator agent to comprehensively validate plugin - Check: manifest, structure, naming, components, security - Review validation report 2. **Fix critical issues**: - Address any critical errors from validation - Fix any warnings that indicate real problems 3. **Review with skill-reviewer** (if plugin has skills): - For each skill, use skill-reviewer agent - Check description quality, progressive disclosure, writing style - Apply recommendations 4. **Test agent triggering** (if plugin has agents): - For each agent, verify blocks are clear - Check triggering conditions are specific - Run validate-agent.sh on agent files 5. **Test hook configuration** (if plugin has hooks): - Run validate-hook-schema.sh on hooks/hooks.json - Test hook scripts with test-hook.sh - Verify ${CLAUDE_PLUGIN_ROOT} usage 6. **Present findings**: - Summary of validation results - Any remaining issues - Overall quality assessment 7. **Ask user**: "Validation complete. Issues found: [count critical], [count warnings]. Would you like me to fix them now, or proceed to testing?" **Output**: Plugin validated and ready for testing --- ## Phase 7: Testing & Verification **Goal**: Test that plugin works correctly in Claude Code **Actions**: 1. **Installation instructions**: - Show user how to test locally: ```bash cc --plugin-dir /path/to/plugin-name ``` - Or copy to `.claude-plugin/` for project testing 2. **Verification checklist** for user to perform: - [ ] Skills load when triggered (ask questions with trigger phrases) - [ ] Commands appear in `/help` and execute correctly - [ ] Agents trigger on appropriate scenarios - [ ] Hooks activate on events (if applicable) - [ ] MCP servers connect (if applicable) - [ ] Settings files work (if applicable) 3. **Testing recommendations**: - For skills: Ask questions using trigger phrases from descriptions - For commands: Run `/plugin-name:command-name` with various arguments - For agents: Create scenarios matching agent examples - For hooks: Use `claude --debug` to see hook execution - For MCP: Use `/mcp` to verify servers and tools 4. **Ask user**: "I've prepared the plugin for testing. Would you like me to guide you through testing each component, or do you want to test it yourself?" 5. **If user wants guidance**, walk through testing each component with specific test cases **Output**: Plugin tested and verified working --- ## Phase 8: Documentation & Next Steps **Goal**: Ensure plugin is well-documented and ready for distribution **Actions**: 1. **Verify README completeness**: - Check README has: overview, features, installation, prerequisites, usage - For MCP plugins: Document required environment variables - For hook plugins: Explain hook activation - For settings: Provide configuration templates 2. **Add marketplace entry** (if publishing): - Show user how to add to marketplace.json - Help draft marketplace description - Suggest category and tags 3. **Create summary**: - Mark all todos complete - List what was created: - Plugin name and purpose - Components created (X skills, Y commands, Z agents, etc.) - Key files and their purposes - Total file count and structure - Next steps: - Testing recommendations - Publishing to marketplace (if desired) - Iteration based on usage 4. **Suggest improvements** (optional): - Additional components that could enhance plugin - Integration opportunities - Testing strategies **Output**: Complete, documented plugin ready for use or publication --- ## Important Notes ### Throughout All Phases - **Use TodoWrite** to track progress at every phase - **Load skills with Skill tool** when working on specific component types - **Use specialized agents** (agent-creator, plugin-validator, skill-reviewer) - **Ask for user confirmation** at key decision points - **Follow plugin-dev's own patterns** as reference examples - **Apply best practices**: - Third-person descriptions for skills - Imperative form in skill bodies - Commands written FOR Claude - Strong trigger phrases - ${CLAUDE_PLUGIN_ROOT} for portability - Progressive disclosure - Security-first (HTTPS, no hardcoded credentials) ### Key Decision Points (Wait for User) 1. After Phase 1: Confirm plugin purpose 2. After Phase 2: Approve component plan 3. After Phase 3: Proceed to implementation 4. After Phase 6: Fix issues or proceed 5. After Phase 7: Continue to documentation ### Skills to Load by Phase - **Phase 2**: plugin-structure - **Phase 5**: skill-development, command-development, agent-development, hook-development, mcp-integration, plugin-settings (as needed) - **Phase 6**: (agents will use skills automatically) ### Quality Standards Every component must meet these standards: - ✅ Follows plugin-dev's proven patterns - ✅ Uses correct naming conventions - ✅ Has strong trigger conditions (skills/agents) - ✅ Includes working examples - ✅ Properly documented - ✅ Validated with utilities - ✅ Tested in Claude Code --- ## Example Workflow ### User Request "Create a plugin for managing database migrations" ### Phase 1: Discovery - Understand: Migration management, database schema versioning - Confirm: User wants to create, run, rollback migrations ### Phase 2: Component Planning - Skills: 1 (migration best practices) - Commands: 3 (create-migration, run-migrations, rollback) - Agents: 1 (migration-validator) - MCP: 1 (database connection) ### Phase 3: Clarifying Questions - Which databases? (PostgreSQL, MySQL, etc.) - Migration file format? (SQL, code-based?) - Should agent validate before applying? - What MCP tools needed? (query, execute, schema) ### Phase 4-8: Implementation, Validation, Testing, Documentation --- **Begin with Phase 1: Discovery** ================================================ FILE: plugins/plugin-dev/skills/agent-development/SKILL.md ================================================ --- name: Agent Development description: This skill should be used when the user asks to "create an agent", "add an agent", "write a subagent", "agent frontmatter", "when to use description", "agent examples", "agent tools", "agent colors", "autonomous agent", or needs guidance on agent structure, system prompts, triggering conditions, or agent development best practices for Claude Code plugins. version: 0.1.0 --- # Agent Development for Claude Code Plugins ## Overview Agents are autonomous subprocesses that handle complex, multi-step tasks independently. Understanding agent structure, triggering conditions, and system prompt design enables creating powerful autonomous capabilities. **Key concepts:** - Agents are FOR autonomous work, commands are FOR user-initiated actions - Markdown file format with YAML frontmatter - Triggering via description field with examples - System prompt defines agent behavior - Model and color customization ## Agent File Structure ### Complete Format ```markdown --- name: agent-identifier description: Use this agent when [triggering conditions]. Examples: Context: [Situation description] user: "[User request]" assistant: "[How assistant should respond and use this agent]" [Why this agent should be triggered] [Additional example...] model: inherit color: blue tools: ["Read", "Write", "Grep"] --- You are [agent role description]... **Your Core Responsibilities:** 1. [Responsibility 1] 2. [Responsibility 2] **Analysis Process:** [Step-by-step workflow] **Output Format:** [What to return] ``` ## Frontmatter Fields ### name (required) Agent identifier used for namespacing and invocation. **Format:** lowercase, numbers, hyphens only **Length:** 3-50 characters **Pattern:** Must start and end with alphanumeric **Good examples:** - `code-reviewer` - `test-generator` - `api-docs-writer` - `security-analyzer` **Bad examples:** - `helper` (too generic) - `-agent-` (starts/ends with hyphen) - `my_agent` (underscores not allowed) - `ag` (too short, < 3 chars) ### description (required) Defines when Claude should trigger this agent. **This is the most critical field.** **Must include:** 1. Triggering conditions ("Use this agent when...") 2. Multiple `` blocks showing usage 3. Context, user request, and assistant response in each example 4. `` explaining why agent triggers **Format:** ``` Use this agent when [conditions]. Examples: Context: [Scenario description] user: "[What user says]" assistant: "[How Claude should respond]" [Why this agent is appropriate] [More examples...] ``` **Best practices:** - Include 2-4 concrete examples - Show proactive and reactive triggering - Cover different phrasings of same intent - Explain reasoning in commentary - Be specific about when NOT to use the agent ### model (required) Which model the agent should use. **Options:** - `inherit` - Use same model as parent (recommended) - `sonnet` - Claude Sonnet (balanced) - `opus` - Claude Opus (most capable, expensive) - `haiku` - Claude Haiku (fast, cheap) **Recommendation:** Use `inherit` unless agent needs specific model capabilities. ### color (required) Visual identifier for agent in UI. **Options:** `blue`, `cyan`, `green`, `yellow`, `magenta`, `red` **Guidelines:** - Choose distinct colors for different agents in same plugin - Use consistent colors for similar agent types - Blue/cyan: Analysis, review - Green: Success-oriented tasks - Yellow: Caution, validation - Red: Critical, security - Magenta: Creative, generation ### tools (optional) Restrict agent to specific tools. **Format:** Array of tool names ```yaml tools: ["Read", "Write", "Grep", "Bash"] ``` **Default:** If omitted, agent has access to all tools **Best practice:** Limit tools to minimum needed (principle of least privilege) **Common tool sets:** - Read-only analysis: `["Read", "Grep", "Glob"]` - Code generation: `["Read", "Write", "Grep"]` - Testing: `["Read", "Bash", "Grep"]` - Full access: Omit field or use `["*"]` ## System Prompt Design The markdown body becomes the agent's system prompt. Write in second person, addressing the agent directly. ### Structure **Standard template:** ```markdown You are [role] specializing in [domain]. **Your Core Responsibilities:** 1. [Primary responsibility] 2. [Secondary responsibility] 3. [Additional responsibilities...] **Analysis Process:** 1. [Step one] 2. [Step two] 3. [Step three] [...] **Quality Standards:** - [Standard 1] - [Standard 2] **Output Format:** Provide results in this format: - [What to include] - [How to structure] **Edge Cases:** Handle these situations: - [Edge case 1]: [How to handle] - [Edge case 2]: [How to handle] ``` ### Best Practices ✅ **DO:** - Write in second person ("You are...", "You will...") - Be specific about responsibilities - Provide step-by-step process - Define output format - Include quality standards - Address edge cases - Keep under 10,000 characters ❌ **DON'T:** - Write in first person ("I am...", "I will...") - Be vague or generic - Omit process steps - Leave output format undefined - Skip quality guidance - Ignore error cases ## Creating Agents ### Method 1: AI-Assisted Generation Use this prompt pattern (extracted from Claude Code): ``` Create an agent configuration based on this request: "[YOUR DESCRIPTION]" Requirements: 1. Extract core intent and responsibilities 2. Design expert persona for the domain 3. Create comprehensive system prompt with: - Clear behavioral boundaries - Specific methodologies - Edge case handling - Output format 4. Create identifier (lowercase, hyphens, 3-50 chars) 5. Write description with triggering conditions 6. Include 2-3 blocks showing when to use Return JSON with: { "identifier": "agent-name", "whenToUse": "Use this agent when... Examples: ...", "systemPrompt": "You are..." } ``` Then convert to agent file format with frontmatter. See `examples/agent-creation-prompt.md` for complete template. ### Method 2: Manual Creation 1. Choose agent identifier (3-50 chars, lowercase, hyphens) 2. Write description with examples 3. Select model (usually `inherit`) 4. Choose color for visual identification 5. Define tools (if restricting access) 6. Write system prompt with structure above 7. Save as `agents/agent-name.md` ## Validation Rules ### Identifier Validation ``` ✅ Valid: code-reviewer, test-gen, api-analyzer-v2 ❌ Invalid: ag (too short), -start (starts with hyphen), my_agent (underscore) ``` **Rules:** - 3-50 characters - Lowercase letters, numbers, hyphens only - Must start and end with alphanumeric - No underscores, spaces, or special characters ### Description Validation **Length:** 10-5,000 characters **Must include:** Triggering conditions and examples **Best:** 200-1,000 characters with 2-4 examples ### System Prompt Validation **Length:** 20-10,000 characters **Best:** 500-3,000 characters **Structure:** Clear responsibilities, process, output format ## Agent Organization ### Plugin Agents Directory ``` plugin-name/ └── agents/ ├── analyzer.md ├── reviewer.md └── generator.md ``` All `.md` files in `agents/` are auto-discovered. ### Namespacing Agents are namespaced automatically: - Single plugin: `agent-name` - With subdirectories: `plugin:subdir:agent-name` ## Testing Agents ### Test Triggering Create test scenarios to verify agent triggers correctly: 1. Write agent with specific triggering examples 2. Use similar phrasing to examples in test 3. Check Claude loads the agent 4. Verify agent provides expected functionality ### Test System Prompt Ensure system prompt is complete: 1. Give agent typical task 2. Check it follows process steps 3. Verify output format is correct 4. Test edge cases mentioned in prompt 5. Confirm quality standards are met ## Quick Reference ### Minimal Agent ```markdown --- name: simple-agent description: Use this agent when... Examples: ... model: inherit color: blue --- You are an agent that [does X]. Process: 1. [Step 1] 2. [Step 2] Output: [What to provide] ``` ### Frontmatter Fields Summary | Field | Required | Format | Example | |-------|----------|--------|---------| | name | Yes | lowercase-hyphens | code-reviewer | | description | Yes | Text + examples | Use when... ... | | model | Yes | inherit/sonnet/opus/haiku | inherit | | color | Yes | Color name | blue | | tools | No | Array of tool names | ["Read", "Grep"] | ### Best Practices **DO:** - ✅ Include 2-4 concrete examples in description - ✅ Write specific triggering conditions - ✅ Use `inherit` for model unless specific need - ✅ Choose appropriate tools (least privilege) - ✅ Write clear, structured system prompts - ✅ Test agent triggering thoroughly **DON'T:** - ❌ Use generic descriptions without examples - ❌ Omit triggering conditions - ❌ Give all agents same color - ❌ Grant unnecessary tool access - ❌ Write vague system prompts - ❌ Skip testing ## Additional Resources ### Reference Files For detailed guidance, consult: - **`references/system-prompt-design.md`** - Complete system prompt patterns - **`references/triggering-examples.md`** - Example formats and best practices - **`references/agent-creation-system-prompt.md`** - The exact prompt from Claude Code ### Example Files Working examples in `examples/`: - **`agent-creation-prompt.md`** - AI-assisted agent generation template - **`complete-agent-examples.md`** - Full agent examples for different use cases ### Utility Scripts Development tools in `scripts/`: - **`validate-agent.sh`** - Validate agent file structure - **`test-agent-trigger.sh`** - Test if agent triggers correctly ## Implementation Workflow To create an agent for a plugin: 1. Define agent purpose and triggering conditions 2. Choose creation method (AI-assisted or manual) 3. Create `agents/agent-name.md` file 4. Write frontmatter with all required fields 5. Write system prompt following best practices 6. Include 2-4 triggering examples in description 7. Validate with `scripts/validate-agent.sh` 8. Test triggering with real scenarios 9. Document agent in plugin README Focus on clear triggering conditions and comprehensive system prompts for autonomous operation. ================================================ FILE: plugins/plugin-dev/skills/agent-development/examples/agent-creation-prompt.md ================================================ # AI-Assisted Agent Generation Template Use this template to generate agents using Claude with the agent creation system prompt. ## Usage Pattern ### Step 1: Describe Your Agent Need Think about: - What task should the agent handle? - When should it be triggered? - Should it be proactive or reactive? - What are the key responsibilities? ### Step 2: Use the Generation Prompt Send this to Claude (with the agent-creation-system-prompt loaded): ``` Create an agent configuration based on this request: "[YOUR DESCRIPTION]" Return ONLY the JSON object, no other text. ``` **Replace [YOUR DESCRIPTION] with your agent requirements.** ### Step 3: Claude Returns JSON Claude will return: ```json { "identifier": "agent-name", "whenToUse": "Use this agent when... Examples: ...", "systemPrompt": "You are... **Your Core Responsibilities:**..." } ``` ### Step 4: Convert to Agent File Create `agents/[identifier].md`: ```markdown --- name: [identifier from JSON] description: [whenToUse from JSON] model: inherit color: [choose: blue/cyan/green/yellow/magenta/red] tools: ["Read", "Write", "Grep"] # Optional: restrict tools --- [systemPrompt from JSON] ``` ## Example 1: Code Review Agent **Your request:** ``` I need an agent that reviews code changes for quality issues, security vulnerabilities, and adherence to best practices. It should be called after code is written and provide specific feedback. ``` **Claude generates:** ```json { "identifier": "code-quality-reviewer", "whenToUse": "Use this agent when the user has written code and needs quality review, or explicitly asks to review code changes. Examples:\n\n\nContext: User just implemented a new feature\nuser: \"I've added the authentication feature\"\nassistant: \"Great! Let me review the code quality.\"\n\nCode was written, trigger code-quality-reviewer agent for review.\n\nassistant: \"I'll use the code-quality-reviewer agent to analyze the changes.\"\n\n\n\nContext: User explicitly requests review\nuser: \"Can you review my code for issues?\"\nassistant: \"I'll use the code-quality-reviewer agent to perform a thorough review.\"\n\nExplicit review request triggers the agent.\n\n", "systemPrompt": "You are an expert code quality reviewer specializing in identifying issues in software implementations.\n\n**Your Core Responsibilities:**\n1. Analyze code changes for quality issues (readability, maintainability, performance)\n2. Identify security vulnerabilities (injection, XSS, authentication issues)\n3. Check adherence to project best practices and coding standards\n4. Provide actionable, specific feedback with line numbers\n\n**Review Process:**\n1. Read the code changes using available tools\n2. Analyze for:\n - Code quality (duplication, complexity, clarity)\n - Security (OWASP top 10, input validation)\n - Best practices (error handling, logging, testing)\n - Project-specific standards (from CLAUDE.md)\n3. Identify issues with severity (critical/major/minor)\n4. Provide specific recommendations with examples\n\n**Output Format:**\nProvide a structured review:\n1. Summary (2-3 sentences)\n2. Critical Issues (must fix)\n3. Major Issues (should fix)\n4. Minor Issues (nice to fix)\n5. Positive observations\n6. Overall assessment\n\nInclude file names and line numbers for all findings." } ``` **You create:** File: `agents/code-quality-reviewer.md` ```markdown --- name: code-quality-reviewer description: Use this agent when the user has written code and needs quality review, or explicitly asks to review code changes. Examples: Context: User just implemented a new feature user: "I've added the authentication feature" assistant: "Great! Let me review the code quality." Code was written, trigger code-quality-reviewer agent for review. assistant: "I'll use the code-quality-reviewer agent to analyze the changes." Context: User explicitly requests review user: "Can you review my code for issues?" assistant: "I'll use the code-quality-reviewer agent to perform a thorough review." Explicit review request triggers the agent. model: inherit color: blue tools: ["Read", "Grep", "Glob"] --- You are an expert code quality reviewer specializing in identifying issues in software implementations. **Your Core Responsibilities:** 1. Analyze code changes for quality issues (readability, maintainability, performance) 2. Identify security vulnerabilities (injection, XSS, authentication issues) 3. Check adherence to project best practices and coding standards 4. Provide actionable, specific feedback with line numbers **Review Process:** 1. Read the code changes using available tools 2. Analyze for: - Code quality (duplication, complexity, clarity) - Security (OWASP top 10, input validation) - Best practices (error handling, logging, testing) - Project-specific standards (from CLAUDE.md) 3. Identify issues with severity (critical/major/minor) 4. Provide specific recommendations with examples **Output Format:** Provide a structured review: 1. Summary (2-3 sentences) 2. Critical Issues (must fix) 3. Major Issues (should fix) 4. Minor Issues (nice to fix) 5. Positive observations 6. Overall assessment Include file names and line numbers for all findings. ``` ## Example 2: Test Generation Agent **Your request:** ``` Create an agent that generates unit tests for code. It should analyze existing code and create comprehensive test suites following project conventions. ``` **Claude generates:** ```json { "identifier": "test-generator", "whenToUse": "Use this agent when the user asks to generate tests, needs test coverage, or has written code that needs testing. Examples:\n\n\nContext: User wrote new functions without tests\nuser: \"I've implemented the user authentication functions\"\nassistant: \"Great! Let me generate tests for these functions.\"\n\nNew code without tests, proactively trigger test-generator.\n\nassistant: \"I'll use the test-generator agent to create comprehensive tests.\"\n", "systemPrompt": "You are an expert test engineer specializing in creating comprehensive unit tests...\n\n**Your Core Responsibilities:**\n1. Analyze code to understand behavior\n2. Generate test cases covering happy paths and edge cases\n3. Follow project testing conventions\n4. Ensure high code coverage\n\n**Test Generation Process:**\n1. Read target code\n2. Identify testable units (functions, classes, methods)\n3. Design test cases (inputs, expected outputs, edge cases)\n4. Generate tests following project patterns\n5. Add assertions and error cases\n\n**Output Format:**\nGenerate complete test files with:\n- Test suite structure\n- Setup/teardown if needed\n- Descriptive test names\n- Comprehensive assertions" } ``` **You create:** `agents/test-generator.md` with the structure above. ## Example 3: Documentation Agent **Your request:** ``` Build an agent that writes and updates API documentation. It should analyze code and generate clear, comprehensive docs. ``` **Result:** Agent file with identifier `api-docs-writer`, appropriate examples, and system prompt for documentation generation. ## Tips for Effective Agent Generation ### Be Specific in Your Request **Vague:** ``` "I need an agent that helps with code" ``` **Specific:** ``` "I need an agent that reviews pull requests for type safety issues in TypeScript, checking for proper type annotations, avoiding 'any', and ensuring correct generic usage" ``` ### Include Triggering Preferences Tell Claude when the agent should activate: ``` "Create an agent that generates tests. It should be triggered proactively after code is written, not just when explicitly requested." ``` ### Mention Project Context ``` "Create a code review agent. This project uses React and TypeScript, so the agent should check for React best practices and TypeScript type safety." ``` ### Define Output Expectations ``` "Create an agent that analyzes performance. It should provide specific recommendations with file names and line numbers, plus estimated performance impact." ``` ## Validation After Generation Always validate generated agents: ```bash # Validate structure ./scripts/validate-agent.sh agents/your-agent.md # Check triggering works # Test with scenarios from examples ``` ## Iterating on Generated Agents If generated agent needs improvement: 1. Identify what's missing or wrong 2. Manually edit the agent file 3. Focus on: - Better examples in description - More specific system prompt - Clearer process steps - Better output format definition 4. Re-validate 5. Test again ## Advantages of AI-Assisted Generation - **Comprehensive**: Claude includes edge cases and quality checks - **Consistent**: Follows proven patterns - **Fast**: Seconds vs manual writing - **Examples**: Auto-generates triggering examples - **Complete**: Provides full system prompt structure ## When to Edit Manually Edit generated agents when: - Need very specific project patterns - Require custom tool combinations - Want unique persona or style - Integrating with existing agents - Need precise triggering conditions Start with generation, then refine manually for best results. ================================================ FILE: plugins/plugin-dev/skills/agent-development/examples/complete-agent-examples.md ================================================ # Complete Agent Examples Full, production-ready agent examples for common use cases. Use these as templates for your own agents. ## Example 1: Code Review Agent **File:** `agents/code-reviewer.md` ```markdown --- name: code-reviewer description: Use this agent when the user has written code and needs quality review, security analysis, or best practices validation. Examples: Context: User just implemented a new feature user: "I've added the payment processing feature" assistant: "Great! Let me review the implementation." Code written for payment processing (security-critical). Proactively trigger code-reviewer agent to check for security issues and best practices. assistant: "I'll use the code-reviewer agent to analyze the payment code." Context: User explicitly requests code review user: "Can you review my code for issues?" assistant: "I'll use the code-reviewer agent to perform a comprehensive review." Explicit code review request triggers the agent. Context: Before committing code user: "I'm ready to commit these changes" assistant: "Let me review them first." Before commit, proactively review code quality. assistant: "I'll use the code-reviewer agent to validate the changes." model: inherit color: blue tools: ["Read", "Grep", "Glob"] --- You are an expert code quality reviewer specializing in identifying issues, security vulnerabilities, and opportunities for improvement in software implementations. **Your Core Responsibilities:** 1. Analyze code changes for quality issues (readability, maintainability, complexity) 2. Identify security vulnerabilities (SQL injection, XSS, authentication flaws, etc.) 3. Check adherence to project best practices and coding standards from CLAUDE.md 4. Provide specific, actionable feedback with file and line number references 5. Recognize and commend good practices **Code Review Process:** 1. **Gather Context**: Use Glob to find recently modified files (git diff, git status) 2. **Read Code**: Use Read tool to examine changed files 3. **Analyze Quality**: - Check for code duplication (DRY principle) - Assess complexity and readability - Verify error handling - Check for proper logging 4. **Security Analysis**: - Scan for injection vulnerabilities (SQL, command, XSS) - Check authentication and authorization - Verify input validation and sanitization - Look for hardcoded secrets or credentials 5. **Best Practices**: - Follow project-specific standards from CLAUDE.md - Check naming conventions - Verify test coverage - Assess documentation 6. **Categorize Issues**: Group by severity (critical/major/minor) 7. **Generate Report**: Format according to output template **Quality Standards:** - Every issue includes file path and line number (e.g., `src/auth.ts:42`) - Issues categorized by severity with clear criteria - Recommendations are specific and actionable (not vague) - Include code examples in recommendations when helpful - Balance criticism with recognition of good practices **Output Format:** ## Code Review Summary [2-3 sentence overview of changes and overall quality] ## Critical Issues (Must Fix) - `src/file.ts:42` - [Issue description] - [Why critical] - [How to fix] ## Major Issues (Should Fix) - `src/file.ts:15` - [Issue description] - [Impact] - [Recommendation] ## Minor Issues (Consider Fixing) - `src/file.ts:88` - [Issue description] - [Suggestion] ## Positive Observations - [Good practice 1] - [Good practice 2] ## Overall Assessment [Final verdict and recommendations] **Edge Cases:** - No issues found: Provide positive validation, mention what was checked - Too many issues (>20): Group by type, prioritize top 10 critical/major - Unclear code intent: Note ambiguity and request clarification - Missing context (no CLAUDE.md): Apply general best practices - Large changeset: Focus on most impactful files first ``` ## Example 2: Test Generator Agent **File:** `agents/test-generator.md` ```markdown --- name: test-generator description: Use this agent when the user has written code without tests, explicitly asks for test generation, or needs test coverage improvement. Examples: Context: User implemented functions without tests user: "I've added the data validation functions" assistant: "Let me generate tests for these." New code without tests. Proactively trigger test-generator agent. assistant: "I'll use the test-generator agent to create comprehensive tests." Context: User explicitly requests tests user: "Generate unit tests for my code" assistant: "I'll use the test-generator agent to create a complete test suite." Direct test generation request triggers the agent. model: inherit color: green tools: ["Read", "Write", "Grep", "Bash"] --- You are an expert test engineer specializing in creating comprehensive, maintainable unit tests that ensure code correctness and reliability. **Your Core Responsibilities:** 1. Generate high-quality unit tests with excellent coverage 2. Follow project testing conventions and patterns 3. Include happy path, edge cases, and error scenarios 4. Ensure tests are maintainable and clear **Test Generation Process:** 1. **Analyze Code**: Read implementation files to understand: - Function signatures and behavior - Input/output contracts - Edge cases and error conditions - Dependencies and side effects 2. **Identify Test Patterns**: Check existing tests for: - Testing framework (Jest, pytest, etc.) - File organization (test/ directory, *.test.ts, etc.) - Naming conventions - Setup/teardown patterns 3. **Design Test Cases**: - Happy path (normal, expected usage) - Boundary conditions (min/max, empty, null) - Error cases (invalid input, exceptions) - Edge cases (special characters, large data, etc.) 4. **Generate Tests**: Create test file with: - Descriptive test names - Arrange-Act-Assert structure - Clear assertions - Appropriate mocking if needed 5. **Verify**: Ensure tests are runnable and clear **Quality Standards:** - Test names clearly describe what is being tested - Each test focuses on single behavior - Tests are independent (no shared state) - Mocks used appropriately (avoid over-mocking) - Edge cases and errors covered - Tests follow DAMP principle (Descriptive And Meaningful Phrases) **Output Format:** Create test file at [appropriate path] with: ```[language] // Test suite for [module] describe('[module name]', () => { // Test cases with descriptive names test('should [expected behavior] when [scenario]', () => { // Arrange // Act // Assert }) // More tests... }) ``` **Edge Cases:** - No existing tests: Create new test file following best practices - Existing test file: Add new tests maintaining consistency - Unclear behavior: Add tests for observable behavior, note uncertainties - Complex mocking: Prefer integration tests or minimal mocking - Untestable code: Suggest refactoring for testability ``` ## Example 3: Documentation Generator **File:** `agents/docs-generator.md` ```markdown --- name: docs-generator description: Use this agent when the user has written code needing documentation, API endpoints requiring docs, or explicitly requests documentation generation. Examples: Context: User implemented new public API user: "I've added the user management API endpoints" assistant: "Let me document these endpoints." New public API needs documentation. Proactively trigger docs-generator. assistant: "I'll use the docs-generator agent to create API documentation." Context: User requests documentation user: "Generate docs for this module" assistant: "I'll use the docs-generator agent to create comprehensive documentation." Explicit documentation request triggers the agent. model: inherit color: cyan tools: ["Read", "Write", "Grep", "Glob"] --- You are an expert technical writer specializing in creating clear, comprehensive documentation for software projects. **Your Core Responsibilities:** 1. Generate accurate, clear documentation from code 2. Follow project documentation standards 3. Include examples and usage patterns 4. Ensure completeness and correctness **Documentation Generation Process:** 1. **Analyze Code**: Read implementation to understand: - Public interfaces and APIs - Parameters and return values - Behavior and side effects - Error conditions 2. **Identify Documentation Pattern**: Check existing docs for: - Format (Markdown, JSDoc, etc.) - Style (terse vs verbose) - Examples and code snippets - Organization structure 3. **Generate Content**: - Clear description of functionality - Parameter documentation - Return value documentation - Usage examples - Error conditions 4. **Format**: Follow project conventions 5. **Validate**: Ensure accuracy and completeness **Quality Standards:** - Documentation matches actual code behavior - Examples are runnable and correct - All public APIs documented - Clear and concise language - Proper formatting and structure **Output Format:** Create documentation in project's standard format: - Function/method signatures - Description of behavior - Parameters with types and descriptions - Return values - Exceptions/errors - Usage examples - Notes or warnings if applicable **Edge Cases:** - Private/internal code: Document only if requested - Complex APIs: Break into sections, provide multiple examples - Deprecated code: Mark as deprecated with migration guide - Unclear behavior: Document observable behavior, note assumptions ``` ## Example 4: Security Analyzer **File:** `agents/security-analyzer.md` ```markdown --- name: security-analyzer description: Use this agent when the user implements security-critical code (auth, payments, data handling), explicitly requests security analysis, or before deploying sensitive changes. Examples: Context: User implemented authentication logic user: "I've added JWT token validation" assistant: "Let me check the security." Authentication code is security-critical. Proactively trigger security-analyzer. assistant: "I'll use the security-analyzer agent to review for security vulnerabilities." Context: User requests security check user: "Check my code for security issues" assistant: "I'll use the security-analyzer agent to perform a thorough security review." Explicit security review request triggers the agent. model: inherit color: red tools: ["Read", "Grep", "Glob"] --- You are an expert security analyst specializing in identifying vulnerabilities and security issues in software implementations. **Your Core Responsibilities:** 1. Identify security vulnerabilities (OWASP Top 10 and beyond) 2. Analyze authentication and authorization logic 3. Check input validation and sanitization 4. Verify secure data handling and storage 5. Provide specific remediation guidance **Security Analysis Process:** 1. **Identify Attack Surface**: Find user input points, APIs, database queries 2. **Check Common Vulnerabilities**: - Injection (SQL, command, XSS, etc.) - Authentication/authorization flaws - Sensitive data exposure - Security misconfiguration - Insecure deserialization 3. **Analyze Patterns**: - Input validation at boundaries - Output encoding - Parameterized queries - Principle of least privilege 4. **Assess Risk**: Categorize by severity and exploitability 5. **Provide Remediation**: Specific fixes with examples **Quality Standards:** - Every vulnerability includes CVE/CWE reference when applicable - Severity based on CVSS criteria - Remediation includes code examples - False positive rate minimized **Output Format:** ## Security Analysis Report ### Summary [High-level security posture assessment] ### Critical Vulnerabilities ([count]) - **[Vulnerability Type]** at `file:line` - Risk: [Description of security impact] - How to Exploit: [Attack scenario] - Fix: [Specific remediation with code example] ### Medium/Low Vulnerabilities [...] ### Security Best Practices Recommendations [...] ### Overall Risk Assessment [High/Medium/Low with justification] **Edge Cases:** - No vulnerabilities: Confirm security review completed, mention what was checked - False positives: Verify before reporting - Uncertain vulnerabilities: Mark as "potential" with caveat - Out of scope items: Note but don't deep-dive ``` ## Customization Tips ### Adapt to Your Domain Take these templates and customize: - Change domain expertise (e.g., "Python expert" vs "React expert") - Adjust process steps for your specific workflow - Modify output format to match your needs - Add domain-specific quality standards - Include technology-specific checks ### Adjust Tool Access Restrict or expand based on agent needs: - **Read-only agents**: `["Read", "Grep", "Glob"]` - **Generator agents**: `["Read", "Write", "Grep"]` - **Executor agents**: `["Read", "Write", "Bash", "Grep"]` - **Full access**: Omit tools field ### Customize Colors Choose colors that match agent purpose: - **Blue**: Analysis, review, investigation - **Cyan**: Documentation, information - **Green**: Generation, creation, success-oriented - **Yellow**: Validation, warnings, caution - **Red**: Security, critical analysis, errors - **Magenta**: Refactoring, transformation, creative ## Using These Templates 1. Copy template that matches your use case 2. Replace placeholders with your specifics 3. Customize process steps for your domain 4. Adjust examples to your triggering scenarios 5. Validate with `scripts/validate-agent.sh` 6. Test triggering with real scenarios 7. Iterate based on agent performance These templates provide battle-tested starting points. Customize them for your specific needs while maintaining the proven structure. ================================================ FILE: plugins/plugin-dev/skills/agent-development/references/agent-creation-system-prompt.md ================================================ # Agent Creation System Prompt This is the exact system prompt used by Claude Code's agent generation feature, refined through extensive production use. ## The Prompt ``` You are an elite AI agent architect specializing in crafting high-performance agent configurations. Your expertise lies in translating user requirements into precisely-tuned agent specifications that maximize effectiveness and reliability. **Important Context**: You may have access to project-specific instructions from CLAUDE.md files and other context that may include coding standards, project structure, and custom requirements. Consider this context when creating agents to ensure they align with the project's established patterns and practices. When a user describes what they want an agent to do, you will: 1. **Extract Core Intent**: Identify the fundamental purpose, key responsibilities, and success criteria for the agent. Look for both explicit requirements and implicit needs. Consider any project-specific context from CLAUDE.md files. For agents that are meant to review code, you should assume that the user is asking to review recently written code and not the whole codebase, unless the user has explicitly instructed you otherwise. 2. **Design Expert Persona**: Create a compelling expert identity that embodies deep domain knowledge relevant to the task. The persona should inspire confidence and guide the agent's decision-making approach. 3. **Architect Comprehensive Instructions**: Develop a system prompt that: - Establishes clear behavioral boundaries and operational parameters - Provides specific methodologies and best practices for task execution - Anticipates edge cases and provides guidance for handling them - Incorporates any specific requirements or preferences mentioned by the user - Defines output format expectations when relevant - Aligns with project-specific coding standards and patterns from CLAUDE.md 4. **Optimize for Performance**: Include: - Decision-making frameworks appropriate to the domain - Quality control mechanisms and self-verification steps - Efficient workflow patterns - Clear escalation or fallback strategies 5. **Create Identifier**: Design a concise, descriptive identifier that: - Uses lowercase letters, numbers, and hyphens only - Is typically 2-4 words joined by hyphens - Clearly indicates the agent's primary function - Is memorable and easy to type - Avoids generic terms like "helper" or "assistant" 6. **Example agent descriptions**: - In the 'whenToUse' field of the JSON object, you should include examples of when this agent should be used. - Examples should be of the form: Context: The user is creating a code-review agent that should be called after a logical chunk of code is written. user: "Please write a function that checks if a number is prime" assistant: "Here is the relevant function: " Since a logical chunk of code was written and the task was completed, now use the code-review agent to review the code. assistant: "Now let me use the code-reviewer agent to review the code" - If the user mentioned or implied that the agent should be used proactively, you should include examples of this. - NOTE: Ensure that in the examples, you are making the assistant use the Agent tool and not simply respond directly to the task. Your output must be a valid JSON object with exactly these fields: { "identifier": "A unique, descriptive identifier using lowercase letters, numbers, and hyphens (e.g., 'code-reviewer', 'api-docs-writer', 'test-generator')", "whenToUse": "A precise, actionable description starting with 'Use this agent when...' that clearly defines the triggering conditions and use cases. Ensure you include examples as described above.", "systemPrompt": "The complete system prompt that will govern the agent's behavior, written in second person ('You are...', 'You will...') and structured for maximum clarity and effectiveness" } Key principles for your system prompts: - Be specific rather than generic - avoid vague instructions - Include concrete examples when they would clarify behavior - Balance comprehensiveness with clarity - every instruction should add value - Ensure the agent has enough context to handle variations of the core task - Make the agent proactive in seeking clarification when needed - Build in quality assurance and self-correction mechanisms Remember: The agents you create should be autonomous experts capable of handling their designated tasks with minimal additional guidance. Your system prompts are their complete operational manual. ``` ## Usage Pattern Use this prompt to generate agent configurations: ```markdown **User input:** "I need an agent that reviews pull requests for code quality issues" **You send to Claude with the system prompt above:** Create an agent configuration based on this request: "I need an agent that reviews pull requests for code quality issues" **Claude returns JSON:** { "identifier": "pr-quality-reviewer", "whenToUse": "Use this agent when the user asks to review a pull request, check code quality, or analyze PR changes. Examples:\n\n\nContext: User has created a PR and wants quality review\nuser: \"Can you review PR #123 for code quality?\"\nassistant: \"I'll use the pr-quality-reviewer agent to analyze the PR.\"\n\nPR review request triggers the pr-quality-reviewer agent.\n\n", "systemPrompt": "You are an expert code quality reviewer...\n\n**Your Core Responsibilities:**\n1. Analyze code changes for quality issues\n2. Check adherence to best practices\n..." } ``` ## Converting to Agent File Take the JSON output and create the agent markdown file: **agents/pr-quality-reviewer.md:** ```markdown --- name: pr-quality-reviewer description: Use this agent when the user asks to review a pull request, check code quality, or analyze PR changes. Examples: Context: User has created a PR and wants quality review user: "Can you review PR #123 for code quality?" assistant: "I'll use the pr-quality-reviewer agent to analyze the PR." PR review request triggers the pr-quality-reviewer agent. model: inherit color: blue --- You are an expert code quality reviewer... **Your Core Responsibilities:** 1. Analyze code changes for quality issues 2. Check adherence to best practices ... ``` ## Customization Tips ### Adapt the System Prompt The base prompt is excellent but can be enhanced for specific needs: **For security-focused agents:** ``` Add after "Architect Comprehensive Instructions": - Include OWASP top 10 security considerations - Check for common vulnerabilities (injection, XSS, etc.) - Validate input sanitization ``` **For test-generation agents:** ``` Add after "Optimize for Performance": - Follow AAA pattern (Arrange, Act, Assert) - Include edge cases and error scenarios - Ensure test isolation and cleanup ``` **For documentation agents:** ``` Add after "Design Expert Persona": - Use clear, concise language - Include code examples - Follow project documentation standards from CLAUDE.md ``` ## Best Practices from Internal Implementation ### 1. Consider Project Context The prompt specifically mentions using CLAUDE.md context: - Agent should align with project patterns - Follow project-specific coding standards - Respect established practices ### 2. Proactive Agent Design Include examples showing proactive usage: ``` Context: After writing code, agent should review proactively user: "Please write a function..." assistant: "[Writes function]" Code written, now use review agent proactively. assistant: "Now let me review this code with the code-reviewer agent" ``` ### 3. Scope Assumptions For code review agents, assume "recently written code" not entire codebase: ``` For agents that review code, assume recent changes unless explicitly stated otherwise. ``` ### 4. Output Structure Always define clear output format in system prompt: ``` **Output Format:** Provide results as: 1. Summary (2-3 sentences) 2. Detailed findings (bullet points) 3. Recommendations (action items) ``` ## Integration with Plugin-Dev Use this system prompt when creating agents for your plugins: 1. Take user request for agent functionality 2. Feed to Claude with this system prompt 3. Get JSON output (identifier, whenToUse, systemPrompt) 4. Convert to agent markdown file with frontmatter 5. Validate with agent validation rules 6. Test triggering conditions 7. Add to plugin's `agents/` directory This provides AI-assisted agent generation following proven patterns from Claude Code's internal implementation. ================================================ FILE: plugins/plugin-dev/skills/agent-development/references/system-prompt-design.md ================================================ # System Prompt Design Patterns Complete guide to writing effective agent system prompts that enable autonomous, high-quality operation. ## Core Structure Every agent system prompt should follow this proven structure: ```markdown You are [specific role] specializing in [specific domain]. **Your Core Responsibilities:** 1. [Primary responsibility - the main task] 2. [Secondary responsibility - supporting task] 3. [Additional responsibilities as needed] **[Task Name] Process:** 1. [First concrete step] 2. [Second concrete step] 3. [Continue with clear steps] [...] **Quality Standards:** - [Standard 1 with specifics] - [Standard 2 with specifics] - [Standard 3 with specifics] **Output Format:** Provide results structured as: - [Component 1] - [Component 2] - [Include specific formatting requirements] **Edge Cases:** Handle these situations: - [Edge case 1]: [Specific handling approach] - [Edge case 2]: [Specific handling approach] ``` ## Pattern 1: Analysis Agents For agents that analyze code, PRs, or documentation: ```markdown You are an expert [domain] analyzer specializing in [specific analysis type]. **Your Core Responsibilities:** 1. Thoroughly analyze [what] for [specific issues] 2. Identify [patterns/problems/opportunities] 3. Provide actionable recommendations **Analysis Process:** 1. **Gather Context**: Read [what] using available tools 2. **Initial Scan**: Identify obvious [issues/patterns] 3. **Deep Analysis**: Examine [specific aspects]: - [Aspect 1]: Check for [criteria] - [Aspect 2]: Verify [criteria] - [Aspect 3]: Assess [criteria] 4. **Synthesize Findings**: Group related issues 5. **Prioritize**: Rank by [severity/impact/urgency] 6. **Generate Report**: Format according to output template **Quality Standards:** - Every finding includes file:line reference - Issues categorized by severity (critical/major/minor) - Recommendations are specific and actionable - Positive observations included for balance **Output Format:** ## Summary [2-3 sentence overview] ## Critical Issues - [file:line] - [Issue description] - [Recommendation] ## Major Issues [...] ## Minor Issues [...] ## Recommendations [...] **Edge Cases:** - No issues found: Provide positive feedback and validation - Too many issues: Group and prioritize top 10 - Unclear code: Request clarification rather than guessing ``` ## Pattern 2: Generation Agents For agents that create code, tests, or documentation: ```markdown You are an expert [domain] engineer specializing in creating high-quality [output type]. **Your Core Responsibilities:** 1. Generate [what] that meets [quality standards] 2. Follow [specific conventions/patterns] 3. Ensure [correctness/completeness/clarity] **Generation Process:** 1. **Understand Requirements**: Analyze what needs to be created 2. **Gather Context**: Read existing [code/docs/tests] for patterns 3. **Design Structure**: Plan [architecture/organization/flow] 4. **Generate Content**: Create [output] following: - [Convention 1] - [Convention 2] - [Best practice 1] 5. **Validate**: Verify [correctness/completeness] 6. **Document**: Add comments/explanations as needed **Quality Standards:** - Follows project conventions (check CLAUDE.md) - [Specific quality metric 1] - [Specific quality metric 2] - Includes error handling - Well-documented and clear **Output Format:** Create [what] with: - [Structure requirement 1] - [Structure requirement 2] - Clear, descriptive naming - Comprehensive coverage **Edge Cases:** - Insufficient context: Ask user for clarification - Conflicting patterns: Follow most recent/explicit pattern - Complex requirements: Break into smaller pieces ``` ## Pattern 3: Validation Agents For agents that validate, check, or verify: ```markdown You are an expert [domain] validator specializing in ensuring [quality aspect]. **Your Core Responsibilities:** 1. Validate [what] against [criteria] 2. Identify violations and issues 3. Provide clear pass/fail determination **Validation Process:** 1. **Load Criteria**: Understand validation requirements 2. **Scan Target**: Read [what] needs validation 3. **Check Rules**: For each rule: - [Rule 1]: [Validation method] - [Rule 2]: [Validation method] 4. **Collect Violations**: Document each failure with details 5. **Assess Severity**: Categorize issues 6. **Determine Result**: Pass only if [criteria met] **Quality Standards:** - All violations include specific locations - Severity clearly indicated - Fix suggestions provided - No false positives **Output Format:** ## Validation Result: [PASS/FAIL] ## Summary [Overall assessment] ## Violations Found: [count] ### Critical ([count]) - [Location]: [Issue] - [Fix] ### Warnings ([count]) - [Location]: [Issue] - [Fix] ## Recommendations [How to fix violations] **Edge Cases:** - No violations: Confirm validation passed - Too many violations: Group by type, show top 20 - Ambiguous rules: Document uncertainty, request clarification ``` ## Pattern 4: Orchestration Agents For agents that coordinate multiple tools or steps: ```markdown You are an expert [domain] orchestrator specializing in coordinating [complex workflow]. **Your Core Responsibilities:** 1. Coordinate [multi-step process] 2. Manage [resources/tools/dependencies] 3. Ensure [successful completion/integration] **Orchestration Process:** 1. **Plan**: Understand full workflow and dependencies 2. **Prepare**: Set up prerequisites 3. **Execute Phases**: - Phase 1: [What] using [tools] - Phase 2: [What] using [tools] - Phase 3: [What] using [tools] 4. **Monitor**: Track progress and handle failures 5. **Verify**: Confirm successful completion 6. **Report**: Provide comprehensive summary **Quality Standards:** - Each phase completes successfully - Errors handled gracefully - Progress reported to user - Final state verified **Output Format:** ## Workflow Execution Report ### Completed Phases - [Phase]: [Result] ### Results - [Output 1] - [Output 2] ### Next Steps [If applicable] **Edge Cases:** - Phase failure: Attempt retry, then report and stop - Missing dependencies: Request from user - Timeout: Report partial completion ``` ## Writing Style Guidelines ### Tone and Voice **Use second person (addressing the agent):** ``` ✅ You are responsible for... ✅ You will analyze... ✅ Your process should... ❌ The agent is responsible for... ❌ This agent will analyze... ❌ I will analyze... ``` ### Clarity and Specificity **Be specific, not vague:** ``` ✅ Check for SQL injection by examining all database queries for parameterization ❌ Look for security issues ✅ Provide file:line references for each finding ❌ Show where issues are ✅ Categorize as critical (security), major (bugs), or minor (style) ❌ Rate the severity of issues ``` ### Actionable Instructions **Give concrete steps:** ``` ✅ Read the file using the Read tool, then search for patterns using Grep ❌ Analyze the code ✅ Generate test file at test/path/to/file.test.ts ❌ Create tests ``` ## Common Pitfalls ### ❌ Vague Responsibilities ```markdown **Your Core Responsibilities:** 1. Help the user with their code 2. Provide assistance 3. Be helpful ``` **Why bad:** Not specific enough to guide behavior. ### ✅ Specific Responsibilities ```markdown **Your Core Responsibilities:** 1. Analyze TypeScript code for type safety issues 2. Identify missing type annotations and improper 'any' usage 3. Recommend specific type improvements with examples ``` ### ❌ Missing Process Steps ```markdown Analyze the code and provide feedback. ``` **Why bad:** Agent doesn't know HOW to analyze. ### ✅ Clear Process ```markdown **Analysis Process:** 1. Read code files using Read tool 2. Scan for type annotations on all functions 3. Check for 'any' type usage 4. Verify generic type parameters 5. List findings with file:line references ``` ### ❌ Undefined Output ```markdown Provide a report. ``` **Why bad:** Agent doesn't know what format to use. ### ✅ Defined Output Format ```markdown **Output Format:** ## Type Safety Report ### Summary [Overview of findings] ### Issues Found - `file.ts:42` - Missing return type on `processData` - `utils.ts:15` - Unsafe 'any' usage in parameter ### Recommendations [Specific fixes with examples] ``` ## Length Guidelines ### Minimum Viable Agent **~500 words minimum:** - Role description - 3 core responsibilities - 5-step process - Output format ### Standard Agent **~1,000-2,000 words:** - Detailed role and expertise - 5-8 responsibilities - 8-12 process steps - Quality standards - Output format - 3-5 edge cases ### Comprehensive Agent **~2,000-5,000 words:** - Complete role with background - Comprehensive responsibilities - Detailed multi-phase process - Extensive quality standards - Multiple output formats - Many edge cases - Examples within system prompt **Avoid > 10,000 words:** Too long, diminishing returns. ## Testing System Prompts ### Test Completeness Can the agent handle these based on system prompt alone? - [ ] Typical task execution - [ ] Edge cases mentioned - [ ] Error scenarios - [ ] Unclear requirements - [ ] Large/complex inputs - [ ] Empty/missing inputs ### Test Clarity Read the system prompt and ask: - Can another developer understand what this agent does? - Are process steps clear and actionable? - Is output format unambiguous? - Are quality standards measurable? ### Iterate Based on Results After testing agent: 1. Identify where it struggled 2. Add missing guidance to system prompt 3. Clarify ambiguous instructions 4. Add process steps for edge cases 5. Re-test ## Conclusion Effective system prompts are: - **Specific**: Clear about what and how - **Structured**: Organized with clear sections - **Complete**: Covers normal and edge cases - **Actionable**: Provides concrete steps - **Testable**: Defines measurable standards Use the patterns above as templates, customize for your domain, and iterate based on agent performance. ================================================ FILE: plugins/plugin-dev/skills/agent-development/references/triggering-examples.md ================================================ # Agent Triggering Examples: Best Practices Complete guide to writing effective `` blocks in agent descriptions for reliable triggering. ## Example Block Format The standard format for triggering examples: ```markdown Context: [Describe the situation - what led to this interaction] user: "[Exact user message or request]" assistant: "[How Claude should respond before triggering]" [Explanation of why this agent should be triggered in this scenario] assistant: "[How Claude triggers the agent - usually 'I'll use the [agent-name] agent...']" ``` ## Anatomy of a Good Example ### Context **Purpose:** Set the scene - what happened before the user's message **Good contexts:** ``` Context: User just implemented a new authentication feature Context: User has created a PR and wants it reviewed Context: User is debugging a test failure Context: After writing several functions without documentation ``` **Bad contexts:** ``` Context: User needs help (too vague) Context: Normal usage (not specific) ``` ### User Message **Purpose:** Show the exact phrasing that should trigger the agent **Good user messages:** ``` user: "I've added the OAuth flow, can you check it?" user: "Review PR #123" user: "Why is this test failing?" user: "Add docs for these functions" ``` **Vary the phrasing:** Include multiple examples with different phrasings for the same intent: ``` Example 1: user: "Review my code" Example 2: user: "Can you check this implementation?" Example 3: user: "Look over my changes" ``` ### Assistant Response (Before Triggering) **Purpose:** Show what Claude says before launching the agent **Good responses:** ``` assistant: "I'll analyze your OAuth implementation." assistant: "Let me review that PR for you." assistant: "I'll investigate the test failure." ``` **Proactive example:** ``` assistant: "Great! Now let me review the code quality." Code was just written, proactively trigger review agent. ``` ### Commentary **Purpose:** Explain the reasoning - WHY this agent should trigger **Good commentary:** ``` User explicitly requested code review, trigger the code-reviewer agent. After code implementation, proactively use review agent to check quality. PR analysis request matches pr-analyzer agent's expertise. ``` **Include decision logic:** ``` User wrote tests (Test tool used). The test-analyzer agent should check test quality and coverage before continuing. ``` ### Assistant Response (Triggering) **Purpose:** Show how Claude invokes the agent **Standard pattern:** ``` assistant: "I'll use the [agent-name] agent to [what it will do]." ``` **Examples:** ``` assistant: "I'll use the code-reviewer agent to analyze the changes." assistant: "Let me use the test-generator agent to create comprehensive tests." assistant: "I'll use the security-analyzer agent to check for vulnerabilities." ``` ## Example Types ### Type 1: Explicit Request User directly asks for what the agent does: ```markdown Context: User explicitly requests security analysis user: "Check my code for security vulnerabilities" assistant: "I'll use the security-analyzer agent to perform a thorough security review." Direct security analysis request triggers the security-analyzer agent. ``` ### Type 2: Proactive Triggering Agent triggers after relevant work without explicit request: ```markdown Context: User just implemented database queries user: "I've added the user search functionality" assistant: "Great! Let me review it for security." Database code written. Proactively trigger security-analyzer to check for SQL injection and other database security issues. assistant: "I'll use the security-analyzer agent to check the database code." ``` ### Type 3: Implicit Request User implies need without stating it directly: ```markdown Context: User mentions code is hard to understand user: "This code is confusing and hard to follow" assistant: "I can help improve the code clarity." User indicates complexity issues. Trigger code-simplifier agent to refactor for better readability. assistant: "I'll use the code-simplifier agent to suggest improvements." ``` ### Type 4: Tool Usage Pattern Agent triggers based on prior tool usage: ```markdown Context: User made multiple edits to test files user: "I've updated all the tests" assistant: "Great! Let me verify test quality." Multiple Edit tools used on test files. Proactively trigger test-quality-analyzer to ensure tests follow best practices. assistant: "I'll use the test-quality-analyzer agent to review the tests." ``` ## Multiple Examples Strategy ### Cover Different Phrasings ```markdown user: "Review my code" [...] user: "Can you check my implementation?" [...] user: "Look over these changes" [...] ``` ### Cover Proactive and Reactive ```markdown Context: User explicitly requests review user: "Review my code for issues" [...] Context: After user writes code user: "I've implemented the feature" assistant: "Great! Now let me review it." Code written, proactively review. [...] ``` ### Cover Edge Cases ```markdown Context: Typical usage user: "Check my PR" [...] Context: Large PR that needs thorough analysis user: "This is a big PR, can you do a deep review?" [...] Large PR mentioned, use thorough analysis mode. ``` ## Common Mistakes ### ❌ Missing Context ```markdown user: "Review my code" assistant: "I'll use the code-reviewer agent." ``` **Why bad:** No context about what led to this request. ### ✅ With Context ```markdown Context: User just finished implementing authentication user: "Review my code" assistant: "I'll use the code-reviewer agent to analyze the auth implementation." Auth code written, review for security and best practices. ``` ### ❌ No Commentary ```markdown Context: User requests review user: "Check my changes" assistant: "I'll use the reviewer agent." ``` **Why bad:** Doesn't explain WHY agent triggers. ### ✅ With Commentary ```markdown Context: User requests review after making changes user: "Check my changes" assistant: "I'll use the code-reviewer agent to analyze the modifications." Code review request triggers agent to check quality, security, and adherence to standards. ``` ### ❌ Agent Responds Directly ```markdown user: "Review my code" assistant: "I found the following issues: [lists issues]" ``` **Why bad:** Shows agent's output, not triggering. ### ✅ Shows Triggering ```markdown user: "Review my code" assistant: "I'll use the code-reviewer agent to perform the review." Review request triggers the agent. ``` ## How Many Examples? ### Minimum: 2 Examples Cover at least: 1. Explicit request 2. One variation or proactive trigger ### Recommended: 3-4 Examples Cover: 1. Explicit request (direct ask) 2. Implicit request (user implies need) 3. Proactive trigger (after relevant work) 4. Edge case or specific scenario ### Maximum: 6 Examples More than 6 makes description too long. Focus on most important scenarios. ## Template Library ### Code Review Agent ```markdown Context: User just implemented a new feature user: "I've added the user registration feature" assistant: "Great! Let me review the code quality." Code written, proactively trigger code-reviewer agent. assistant: "I'll use the code-reviewer agent to analyze the implementation." Context: User explicitly requests review user: "Can you review my changes for issues?" assistant: "I'll use the code-reviewer agent to perform a thorough review." Explicit review request triggers the agent. ``` ### Test Generation Agent ```markdown Context: User implemented functions without tests user: "I've added the data processing functions" assistant: "Great! Let me generate tests for these." New code without tests, proactively trigger test-generator agent. assistant: "I'll use the test-generator agent to create comprehensive tests." Context: User explicitly asks for tests user: "Generate tests for my code" assistant: "I'll use the test-generator agent to create a complete test suite." Direct test generation request triggers the agent. ``` ### Documentation Agent ```markdown Context: User added new API endpoints user: "I've implemented the REST API endpoints" assistant: "I'll document these endpoints." New API code, proactively generate documentation. assistant: "I'll use the api-docs-writer agent to create comprehensive documentation." Context: User requests documentation user: "Write docs for this API" assistant: "I'll use the api-docs-writer agent to generate clear documentation." Documentation request triggers the agent. ``` ### Validation Agent ```markdown Context: User ready to commit changes user: "I'm ready to commit this code" assistant: "Let me validate it first." Before commit, proactively validate with validation-agent. assistant: "I'll use the code-validator agent to check for issues." Context: User asks for validation user: "Validate my implementation" assistant: "I'll use the code-validator agent to verify correctness." Explicit validation request triggers the agent. ``` ## Debugging Triggering Issues ### Agent Not Triggering **Check:** 1. Examples include relevant keywords from user message 2. Context matches actual usage scenarios 3. Commentary explains triggering logic clearly 4. Assistant shows use of Agent tool in examples **Fix:** Add more examples covering different phrasings. ### Agent Triggers Too Often **Check:** 1. Examples are too broad or generic 2. Triggering conditions overlap with other agents 3. Commentary doesn't distinguish when NOT to use **Fix:** Make examples more specific, add negative examples. ### Agent Triggers in Wrong Scenarios **Check:** 1. Examples don't match actual intended use 2. Commentary suggests inappropriate triggering **Fix:** Revise examples to show only correct triggering scenarios. ## Best Practices Summary ✅ **DO:** - Include 2-4 concrete, specific examples - Show both explicit and proactive triggering - Provide clear context for each example - Explain reasoning in commentary - Vary user message phrasing - Show Claude using Agent tool ❌ **DON'T:** - Use generic, vague examples - Omit context or commentary - Show only one type of triggering - Skip the agent invocation step - Make examples too similar - Forget to explain why agent triggers ## Conclusion Well-crafted examples are crucial for reliable agent triggering. Invest time in creating diverse, specific examples that clearly demonstrate when and why the agent should be used. ================================================ FILE: plugins/plugin-dev/skills/agent-development/scripts/validate-agent.sh ================================================ #!/bin/bash # Agent File Validator # Validates agent markdown files for correct structure and content set -euo pipefail # Usage if [ $# -eq 0 ]; then echo "Usage: $0 " echo "" echo "Validates agent file for:" echo " - YAML frontmatter structure" echo " - Required fields (name, description, model, color)" echo " - Field formats and constraints" echo " - System prompt presence and length" echo " - Example blocks in description" exit 1 fi AGENT_FILE="$1" echo "🔍 Validating agent file: $AGENT_FILE" echo "" # Check 1: File exists if [ ! -f "$AGENT_FILE" ]; then echo "❌ File not found: $AGENT_FILE" exit 1 fi echo "✅ File exists" # Check 2: Starts with --- FIRST_LINE=$(head -1 "$AGENT_FILE") if [ "$FIRST_LINE" != "---" ]; then echo "❌ File must start with YAML frontmatter (---)" exit 1 fi echo "✅ Starts with frontmatter" # Check 3: Has closing --- if ! tail -n +2 "$AGENT_FILE" | grep -q '^---$'; then echo "❌ Frontmatter not closed (missing second ---)" exit 1 fi echo "✅ Frontmatter properly closed" # Extract frontmatter and system prompt FRONTMATTER=$(sed -n '/^---$/,/^---$/{ /^---$/d; p; }' "$AGENT_FILE") SYSTEM_PROMPT=$(awk '/^---$/{i++; next} i>=2' "$AGENT_FILE") # Check 4: Required fields echo "" echo "Checking required fields..." error_count=0 warning_count=0 # Check name field NAME=$(echo "$FRONTMATTER" | grep '^name:' | sed 's/name: *//' | sed 's/^"\(.*\)"$/\1/') if [ -z "$NAME" ]; then echo "❌ Missing required field: name" ((error_count++)) else echo "✅ name: $NAME" # Validate name format if ! [[ "$NAME" =~ ^[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]$ ]]; then echo "❌ name must start/end with alphanumeric and contain only letters, numbers, hyphens" ((error_count++)) fi # Validate name length name_length=${#NAME} if [ $name_length -lt 3 ]; then echo "❌ name too short (minimum 3 characters)" ((error_count++)) elif [ $name_length -gt 50 ]; then echo "❌ name too long (maximum 50 characters)" ((error_count++)) fi # Check for generic names if [[ "$NAME" =~ ^(helper|assistant|agent|tool)$ ]]; then echo "⚠️ name is too generic: $NAME" ((warning_count++)) fi fi # Check description field DESCRIPTION=$(echo "$FRONTMATTER" | grep '^description:' | sed 's/description: *//') if [ -z "$DESCRIPTION" ]; then echo "❌ Missing required field: description" ((error_count++)) else desc_length=${#DESCRIPTION} echo "✅ description: ${desc_length} characters" if [ $desc_length -lt 10 ]; then echo "⚠️ description too short (minimum 10 characters recommended)" ((warning_count++)) elif [ $desc_length -gt 5000 ]; then echo "⚠️ description very long (over 5000 characters)" ((warning_count++)) fi # Check for example blocks if ! echo "$DESCRIPTION" | grep -q ''; then echo "⚠️ description should include blocks for triggering" ((warning_count++)) fi # Check for "Use this agent when" pattern if ! echo "$DESCRIPTION" | grep -qi 'use this agent when'; then echo "⚠️ description should start with 'Use this agent when...'" ((warning_count++)) fi fi # Check model field MODEL=$(echo "$FRONTMATTER" | grep '^model:' | sed 's/model: *//') if [ -z "$MODEL" ]; then echo "❌ Missing required field: model" ((error_count++)) else echo "✅ model: $MODEL" case "$MODEL" in inherit|sonnet|opus|haiku) # Valid model ;; *) echo "⚠️ Unknown model: $MODEL (valid: inherit, sonnet, opus, haiku)" ((warning_count++)) ;; esac fi # Check color field COLOR=$(echo "$FRONTMATTER" | grep '^color:' | sed 's/color: *//') if [ -z "$COLOR" ]; then echo "❌ Missing required field: color" ((error_count++)) else echo "✅ color: $COLOR" case "$COLOR" in blue|cyan|green|yellow|magenta|red) # Valid color ;; *) echo "⚠️ Unknown color: $COLOR (valid: blue, cyan, green, yellow, magenta, red)" ((warning_count++)) ;; esac fi # Check tools field (optional) TOOLS=$(echo "$FRONTMATTER" | grep '^tools:' | sed 's/tools: *//') if [ -n "$TOOLS" ]; then echo "✅ tools: $TOOLS" else echo "💡 tools: not specified (agent has access to all tools)" fi # Check 5: System prompt echo "" echo "Checking system prompt..." if [ -z "$SYSTEM_PROMPT" ]; then echo "❌ System prompt is empty" ((error_count++)) else prompt_length=${#SYSTEM_PROMPT} echo "✅ System prompt: $prompt_length characters" if [ $prompt_length -lt 20 ]; then echo "❌ System prompt too short (minimum 20 characters)" ((error_count++)) elif [ $prompt_length -gt 10000 ]; then echo "⚠️ System prompt very long (over 10,000 characters)" ((warning_count++)) fi # Check for second person if ! echo "$SYSTEM_PROMPT" | grep -q "You are\|You will\|Your"; then echo "⚠️ System prompt should use second person (You are..., You will...)" ((warning_count++)) fi # Check for structure if ! echo "$SYSTEM_PROMPT" | grep -qi "responsibilities\|process\|steps"; then echo "💡 Consider adding clear responsibilities or process steps" fi if ! echo "$SYSTEM_PROMPT" | grep -qi "output"; then echo "💡 Consider defining output format expectations" fi fi echo "" echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" if [ $error_count -eq 0 ] && [ $warning_count -eq 0 ]; then echo "✅ All checks passed!" exit 0 elif [ $error_count -eq 0 ]; then echo "⚠️ Validation passed with $warning_count warning(s)" exit 0 else echo "❌ Validation failed with $error_count error(s) and $warning_count warning(s)" exit 1 fi ================================================ FILE: plugins/plugin-dev/skills/command-development/README.md ================================================ # Command Development Skill Comprehensive guidance on creating Claude Code slash commands, including file format, frontmatter options, dynamic arguments, and best practices. ## Overview This skill provides knowledge about: - Slash command file format and structure - YAML frontmatter configuration fields - Dynamic arguments ($ARGUMENTS, $1, $2, etc.) - File references with @ syntax - Bash execution with !` syntax - Command organization and namespacing - Best practices for command development - Plugin-specific features (${CLAUDE_PLUGIN_ROOT}, plugin patterns) - Integration with plugin components (agents, skills, hooks) - Validation patterns and error handling ## Skill Structure ### SKILL.md (~2,470 words) Core skill content covering: **Fundamentals:** - Command basics and locations - File format (Markdown with optional frontmatter) - YAML frontmatter fields overview - Dynamic arguments ($ARGUMENTS and positional) - File references (@ syntax) - Bash execution (!` syntax) - Command organization patterns - Best practices and common patterns - Troubleshooting **Plugin-Specific:** - ${CLAUDE_PLUGIN_ROOT} environment variable - Plugin command discovery and organization - Plugin command patterns (configuration, template, multi-script) - Integration with plugin components (agents, skills, hooks) - Validation patterns (argument, file, resource, error handling) ### References Detailed documentation: - **frontmatter-reference.md**: Complete YAML frontmatter field specifications - All field descriptions with types and defaults - When to use each field - Examples and best practices - Validation and common errors - **plugin-features-reference.md**: Plugin-specific command features - Plugin command discovery and organization - ${CLAUDE_PLUGIN_ROOT} environment variable usage - Plugin command patterns (configuration, template, multi-script) - Integration with plugin agents, skills, and hooks - Validation patterns and error handling ### Examples Practical command examples: - **simple-commands.md**: 10 complete command examples - Code review commands - Testing commands - Deployment commands - Documentation generators - Git integration commands - Analysis and research commands - **plugin-commands.md**: 10 plugin-specific command examples - Simple plugin commands with scripts - Multi-script workflows - Template-based generation - Configuration-driven deployment - Agent and skill integration - Multi-component workflows - Validated input commands - Environment-aware commands ## When This Skill Triggers Claude Code activates this skill when users: - Ask to "create a slash command" or "add a command" - Need to "write a custom command" - Want to "define command arguments" - Ask about "command frontmatter" or YAML configuration - Need to "organize commands" or use namespacing - Want to create commands with file references - Ask about "bash execution in commands" - Need command development best practices ## Progressive Disclosure The skill uses progressive disclosure: 1. **SKILL.md** (~2,470 words): Core concepts, common patterns, and plugin features overview 2. **References** (~13,500 words total): Detailed specifications - frontmatter-reference.md (~1,200 words) - plugin-features-reference.md (~1,800 words) - interactive-commands.md (~2,500 words) - advanced-workflows.md (~1,700 words) - testing-strategies.md (~2,200 words) - documentation-patterns.md (~2,000 words) - marketplace-considerations.md (~2,200 words) 3. **Examples** (~6,000 words total): Complete working command examples - simple-commands.md - plugin-commands.md Claude loads references and examples as needed based on task. ## Command Basics Quick Reference ### File Format ```markdown --- description: Brief description argument-hint: [arg1] [arg2] allowed-tools: Read, Bash(git:*) --- Command prompt content with: - Arguments: $1, $2, or $ARGUMENTS - Files: @path/to/file - Bash: !`command here` ``` ### Locations - **Project**: `.claude/commands/` (shared with team) - **Personal**: `~/.claude/commands/` (your commands) - **Plugin**: `plugin-name/commands/` (plugin-specific) ### Key Features **Dynamic arguments:** - `$ARGUMENTS` - All arguments as single string - `$1`, `$2`, `$3` - Positional arguments **File references:** - `@path/to/file` - Include file contents **Bash execution:** - `!`command`` - Execute and include output ## Frontmatter Fields Quick Reference | Field | Purpose | Example | |-------|---------|---------| | `description` | Brief description for /help | `"Review code for issues"` | | `allowed-tools` | Restrict tool access | `Read, Bash(git:*)` | | `model` | Specify model | `sonnet`, `opus`, `haiku` | | `argument-hint` | Document arguments | `[pr-number] [priority]` | | `disable-model-invocation` | Manual-only command | `true` | ## Common Patterns ### Simple Review Command ```markdown --- description: Review code for issues --- Review this code for quality and potential bugs. ``` ### Command with Arguments ```markdown --- description: Deploy to environment argument-hint: [environment] [version] --- Deploy to $1 environment using version $2 ``` ### Command with File Reference ```markdown --- description: Document file argument-hint: [file-path] --- Generate documentation for @$1 ``` ### Command with Bash Execution ```markdown --- description: Show Git status allowed-tools: Bash(git:*) --- Current status: !`git status` Recent commits: !`git log --oneline -5` ``` ## Development Workflow 1. **Design command:** - Define purpose and scope - Determine required arguments - Identify needed tools 2. **Create file:** - Choose appropriate location - Create `.md` file with command name - Write basic prompt 3. **Add frontmatter:** - Start minimal (just description) - Add fields as needed (allowed-tools, etc.) - Document arguments with argument-hint 4. **Test command:** - Invoke with `/command-name` - Verify arguments work - Check bash execution - Test file references 5. **Refine:** - Improve prompt clarity - Handle edge cases - Add examples in comments - Document requirements ## Best Practices Summary 1. **Single responsibility**: One command, one clear purpose 2. **Clear descriptions**: Make discoverable in `/help` 3. **Document arguments**: Always use argument-hint 4. **Minimal tools**: Use most restrictive allowed-tools 5. **Test thoroughly**: Verify all features work 6. **Add comments**: Explain complex logic 7. **Handle errors**: Consider missing arguments/files ## Status **Completed enhancements:** - ✓ Plugin command patterns (${CLAUDE_PLUGIN_ROOT}, discovery, organization) - ✓ Integration patterns (agents, skills, hooks coordination) - ✓ Validation patterns (input, file, resource validation, error handling) **Remaining enhancements (in progress):** - Advanced workflows (multi-step command sequences) - Testing strategies (how to test commands effectively) - Documentation patterns (command documentation best practices) - Marketplace considerations (publishing and distribution) ## Maintenance To update this skill: 1. Keep SKILL.md focused on core fundamentals 2. Move detailed specifications to references/ 3. Add new examples/ for different use cases 4. Update frontmatter when new fields added 5. Ensure imperative/infinitive form throughout 6. Test examples work with current Claude Code ## Version History **v0.1.0** (2025-01-15): - Initial release with basic command fundamentals - Frontmatter field reference - 10 simple command examples - Ready for plugin-specific pattern additions ================================================ FILE: plugins/plugin-dev/skills/command-development/SKILL.md ================================================ --- name: Command Development description: This skill should be used when the user asks to "create a slash command", "add a command", "write a custom command", "define command arguments", "use command frontmatter", "organize commands", "create command with file references", "interactive command", "use AskUserQuestion in command", or needs guidance on slash command structure, YAML frontmatter fields, dynamic arguments, bash execution in commands, user interaction patterns, or command development best practices for Claude Code. version: 0.2.0 --- # Command Development for Claude Code ## Overview Slash commands are frequently-used prompts defined as Markdown files that Claude executes during interactive sessions. Understanding command structure, frontmatter options, and dynamic features enables creating powerful, reusable workflows. **Key concepts:** - Markdown file format for commands - YAML frontmatter for configuration - Dynamic arguments and file references - Bash execution for context - Command organization and namespacing ## Command Basics ### What is a Slash Command? A slash command is a Markdown file containing a prompt that Claude executes when invoked. Commands provide: - **Reusability**: Define once, use repeatedly - **Consistency**: Standardize common workflows - **Sharing**: Distribute across team or projects - **Efficiency**: Quick access to complex prompts ### Critical: Commands are Instructions FOR Claude **Commands are written for agent consumption, not human consumption.** When a user invokes `/command-name`, the command content becomes Claude's instructions. Write commands as directives TO Claude about what to do, not as messages TO the user. **Correct approach (instructions for Claude):** ```markdown Review this code for security vulnerabilities including: - SQL injection - XSS attacks - Authentication issues Provide specific line numbers and severity ratings. ``` **Incorrect approach (messages to user):** ```markdown This command will review your code for security issues. You'll receive a report with vulnerability details. ``` The first example tells Claude what to do. The second tells the user what will happen but doesn't instruct Claude. Always use the first approach. ### Command Locations **Project commands** (shared with team): - Location: `.claude/commands/` - Scope: Available in specific project - Label: Shown as "(project)" in `/help` - Use for: Team workflows, project-specific tasks **Personal commands** (available everywhere): - Location: `~/.claude/commands/` - Scope: Available in all projects - Label: Shown as "(user)" in `/help` - Use for: Personal workflows, cross-project utilities **Plugin commands** (bundled with plugins): - Location: `plugin-name/commands/` - Scope: Available when plugin installed - Label: Shown as "(plugin-name)" in `/help` - Use for: Plugin-specific functionality ## File Format ### Basic Structure Commands are Markdown files with `.md` extension: ``` .claude/commands/ ├── review.md # /review command ├── test.md # /test command └── deploy.md # /deploy command ``` **Simple command:** ```markdown Review this code for security vulnerabilities including: - SQL injection - XSS attacks - Authentication bypass - Insecure data handling ``` No frontmatter needed for basic commands. ### With YAML Frontmatter Add configuration using YAML frontmatter: ```markdown --- description: Review code for security issues allowed-tools: Read, Grep, Bash(git:*) model: sonnet --- Review this code for security vulnerabilities... ``` ## YAML Frontmatter Fields ### description **Purpose:** Brief description shown in `/help` **Type:** String **Default:** First line of command prompt ```yaml --- description: Review pull request for code quality --- ``` **Best practice:** Clear, actionable description (under 60 characters) ### allowed-tools **Purpose:** Specify which tools command can use **Type:** String or Array **Default:** Inherits from conversation ```yaml --- allowed-tools: Read, Write, Edit, Bash(git:*) --- ``` **Patterns:** - `Read, Write, Edit` - Specific tools - `Bash(git:*)` - Bash with git commands only - `*` - All tools (rarely needed) **Use when:** Command requires specific tool access ### model **Purpose:** Specify model for command execution **Type:** String (sonnet, opus, haiku) **Default:** Inherits from conversation ```yaml --- model: haiku --- ``` **Use cases:** - `haiku` - Fast, simple commands - `sonnet` - Standard workflows - `opus` - Complex analysis ### argument-hint **Purpose:** Document expected arguments for autocomplete **Type:** String **Default:** None ```yaml --- argument-hint: [pr-number] [priority] [assignee] --- ``` **Benefits:** - Helps users understand command arguments - Improves command discovery - Documents command interface ### disable-model-invocation **Purpose:** Prevent SlashCommand tool from programmatically calling command **Type:** Boolean **Default:** false ```yaml --- disable-model-invocation: true --- ``` **Use when:** Command should only be manually invoked ## Dynamic Arguments ### Using $ARGUMENTS Capture all arguments as single string: ```markdown --- description: Fix issue by number argument-hint: [issue-number] --- Fix issue #$ARGUMENTS following our coding standards and best practices. ``` **Usage:** ``` > /fix-issue 123 > /fix-issue 456 ``` **Expands to:** ``` Fix issue #123 following our coding standards... Fix issue #456 following our coding standards... ``` ### Using Positional Arguments Capture individual arguments with `$1`, `$2`, `$3`, etc.: ```markdown --- description: Review PR with priority and assignee argument-hint: [pr-number] [priority] [assignee] --- Review pull request #$1 with priority level $2. After review, assign to $3 for follow-up. ``` **Usage:** ``` > /review-pr 123 high alice ``` **Expands to:** ``` Review pull request #123 with priority level high. After review, assign to alice for follow-up. ``` ### Combining Arguments Mix positional and remaining arguments: ```markdown Deploy $1 to $2 environment with options: $3 ``` **Usage:** ``` > /deploy api staging --force --skip-tests ``` **Expands to:** ``` Deploy api to staging environment with options: --force --skip-tests ``` ## File References ### Using @ Syntax Include file contents in command: ```markdown --- description: Review specific file argument-hint: [file-path] --- Review @$1 for: - Code quality - Best practices - Potential bugs ``` **Usage:** ``` > /review-file src/api/users.ts ``` **Effect:** Claude reads `src/api/users.ts` before processing command ### Multiple File References Reference multiple files: ```markdown Compare @src/old-version.js with @src/new-version.js Identify: - Breaking changes - New features - Bug fixes ``` ### Static File References Reference known files without arguments: ```markdown Review @package.json and @tsconfig.json for consistency Ensure: - TypeScript version matches - Dependencies are aligned - Build configuration is correct ``` ## Bash Execution in Commands Commands can execute bash commands inline to dynamically gather context before Claude processes the command. This is useful for including repository state, environment information, or project-specific context. **When to use:** - Include dynamic context (git status, environment vars, etc.) - Gather project/repository state - Build context-aware workflows **Implementation details:** For complete syntax, examples, and best practices, see `references/plugin-features-reference.md` section on bash execution. The reference includes the exact syntax and multiple working examples to avoid execution issues ## Command Organization ### Flat Structure Simple organization for small command sets: ``` .claude/commands/ ├── build.md ├── test.md ├── deploy.md ├── review.md └── docs.md ``` **Use when:** 5-15 commands, no clear categories ### Namespaced Structure Organize commands in subdirectories: ``` .claude/commands/ ├── ci/ │ ├── build.md # /build (project:ci) │ ├── test.md # /test (project:ci) │ └── lint.md # /lint (project:ci) ├── git/ │ ├── commit.md # /commit (project:git) │ └── pr.md # /pr (project:git) └── docs/ ├── generate.md # /generate (project:docs) └── publish.md # /publish (project:docs) ``` **Benefits:** - Logical grouping by category - Namespace shown in `/help` - Easier to find related commands **Use when:** 15+ commands, clear categories ## Best Practices ### Command Design 1. **Single responsibility:** One command, one task 2. **Clear descriptions:** Self-explanatory in `/help` 3. **Explicit dependencies:** Use `allowed-tools` when needed 4. **Document arguments:** Always provide `argument-hint` 5. **Consistent naming:** Use verb-noun pattern (review-pr, fix-issue) ### Argument Handling 1. **Validate arguments:** Check for required arguments in prompt 2. **Provide defaults:** Suggest defaults when arguments missing 3. **Document format:** Explain expected argument format 4. **Handle edge cases:** Consider missing or invalid arguments ```markdown --- argument-hint: [pr-number] --- $IF($1, Review PR #$1, Please provide a PR number. Usage: /review-pr [number] ) ``` ### File References 1. **Explicit paths:** Use clear file paths 2. **Check existence:** Handle missing files gracefully 3. **Relative paths:** Use project-relative paths 4. **Glob support:** Consider using Glob tool for patterns ### Bash Commands 1. **Limit scope:** Use `Bash(git:*)` not `Bash(*)` 2. **Safe commands:** Avoid destructive operations 3. **Handle errors:** Consider command failures 4. **Keep fast:** Long-running commands slow invocation ### Documentation 1. **Add comments:** Explain complex logic 2. **Provide examples:** Show usage in comments 3. **List requirements:** Document dependencies 4. **Version commands:** Note breaking changes ```markdown --- description: Deploy application to environment argument-hint: [environment] [version] --- Deploy application to $1 environment using version $2... ``` ## Common Patterns ### Review Pattern ```markdown --- description: Review code changes allowed-tools: Read, Bash(git:*) --- Files changed: !`git diff --name-only` Review each file for: 1. Code quality and style 2. Potential bugs or issues 3. Test coverage 4. Documentation needs Provide specific feedback for each file. ``` ### Testing Pattern ```markdown --- description: Run tests for specific file argument-hint: [test-file] allowed-tools: Bash(npm:*) --- Run tests: !`npm test $1` Analyze results and suggest fixes for failures. ``` ### Documentation Pattern ```markdown --- description: Generate documentation for file argument-hint: [source-file] --- Generate comprehensive documentation for @$1 including: - Function/class descriptions - Parameter documentation - Return value descriptions - Usage examples - Edge cases and errors ``` ### Workflow Pattern ```markdown --- description: Complete PR workflow argument-hint: [pr-number] allowed-tools: Bash(gh:*), Read --- PR #$1 Workflow: 1. Fetch PR: !`gh pr view $1` 2. Review changes 3. Run checks 4. Approve or request changes ``` ## Troubleshooting **Command not appearing:** - Check file is in correct directory - Verify `.md` extension present - Ensure valid Markdown format - Restart Claude Code **Arguments not working:** - Verify `$1`, `$2` syntax correct - Check `argument-hint` matches usage - Ensure no extra spaces **Bash execution failing:** - Check `allowed-tools` includes Bash - Verify command syntax in backticks - Test command in terminal first - Check for required permissions **File references not working:** - Verify `@` syntax correct - Check file path is valid - Ensure Read tool allowed - Use absolute or project-relative paths ## Plugin-Specific Features ### CLAUDE_PLUGIN_ROOT Variable Plugin commands have access to `${CLAUDE_PLUGIN_ROOT}`, an environment variable that resolves to the plugin's absolute path. **Purpose:** - Reference plugin files portably - Execute plugin scripts - Load plugin configuration - Access plugin templates **Basic usage:** ```markdown --- description: Analyze using plugin script allowed-tools: Bash(node:*) --- Run analysis: !`node ${CLAUDE_PLUGIN_ROOT}/scripts/analyze.js $1` Review results and report findings. ``` **Common patterns:** ```markdown # Execute plugin script !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/script.sh` # Load plugin configuration @${CLAUDE_PLUGIN_ROOT}/config/settings.json # Use plugin template @${CLAUDE_PLUGIN_ROOT}/templates/report.md # Access plugin resources @${CLAUDE_PLUGIN_ROOT}/docs/reference.md ``` **Why use it:** - Works across all installations - Portable between systems - No hardcoded paths needed - Essential for multi-file plugins ### Plugin Command Organization Plugin commands discovered automatically from `commands/` directory: ``` plugin-name/ ├── commands/ │ ├── foo.md # /foo (plugin:plugin-name) │ ├── bar.md # /bar (plugin:plugin-name) │ └── utils/ │ └── helper.md # /helper (plugin:plugin-name:utils) └── plugin.json ``` **Namespace benefits:** - Logical command grouping - Shown in `/help` output - Avoid name conflicts - Organize related commands **Naming conventions:** - Use descriptive action names - Avoid generic names (test, run) - Consider plugin-specific prefix - Use hyphens for multi-word names ### Plugin Command Patterns **Configuration-based pattern:** ```markdown --- description: Deploy using plugin configuration argument-hint: [environment] allowed-tools: Read, Bash(*) --- Load configuration: @${CLAUDE_PLUGIN_ROOT}/config/$1-deploy.json Deploy to $1 using configuration settings. Monitor deployment and report status. ``` **Template-based pattern:** ```markdown --- description: Generate docs from template argument-hint: [component] --- Template: @${CLAUDE_PLUGIN_ROOT}/templates/docs.md Generate documentation for $1 following template structure. ``` **Multi-script pattern:** ```markdown --- description: Complete build workflow allowed-tools: Bash(*) --- Build: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/build.sh` Test: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/test.sh` Package: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/package.sh` Review outputs and report workflow status. ``` **See `references/plugin-features-reference.md` for detailed patterns.** ## Integration with Plugin Components Commands can integrate with other plugin components for powerful workflows. ### Agent Integration Launch plugin agents for complex tasks: ```markdown --- description: Deep code review argument-hint: [file-path] --- Initiate comprehensive review of @$1 using the code-reviewer agent. The agent will analyze: - Code structure - Security issues - Performance - Best practices Agent uses plugin resources: - ${CLAUDE_PLUGIN_ROOT}/config/rules.json - ${CLAUDE_PLUGIN_ROOT}/checklists/review.md ``` **Key points:** - Agent must exist in `plugin/agents/` directory - Claude uses Task tool to launch agent - Document agent capabilities - Reference plugin resources agent uses ### Skill Integration Leverage plugin skills for specialized knowledge: ```markdown --- description: Document API with standards argument-hint: [api-file] --- Document API in @$1 following plugin standards. Use the api-docs-standards skill to ensure: - Complete endpoint documentation - Consistent formatting - Example quality - Error documentation Generate production-ready API docs. ``` **Key points:** - Skill must exist in `plugin/skills/` directory - Mention skill name to trigger invocation - Document skill purpose - Explain what skill provides ### Hook Coordination Design commands that work with plugin hooks: - Commands can prepare state for hooks to process - Hooks execute automatically on tool events - Commands should document expected hook behavior - Guide Claude on interpreting hook output See `references/plugin-features-reference.md` for examples of commands that coordinate with hooks ### Multi-Component Workflows Combine agents, skills, and scripts: ```markdown --- description: Comprehensive review workflow argument-hint: [file] allowed-tools: Bash(node:*), Read --- Target: @$1 Phase 1 - Static Analysis: !`node ${CLAUDE_PLUGIN_ROOT}/scripts/lint.js $1` Phase 2 - Deep Review: Launch code-reviewer agent for detailed analysis. Phase 3 - Standards Check: Use coding-standards skill for validation. Phase 4 - Report: Template: @${CLAUDE_PLUGIN_ROOT}/templates/review.md Compile findings into report following template. ``` **When to use:** - Complex multi-step workflows - Leverage multiple plugin capabilities - Require specialized analysis - Need structured outputs ## Validation Patterns Commands should validate inputs and resources before processing. ### Argument Validation ```markdown --- description: Deploy with validation argument-hint: [environment] --- Validate environment: !`echo "$1" | grep -E "^(dev|staging|prod)$" || echo "INVALID"` If $1 is valid environment: Deploy to $1 Otherwise: Explain valid environments: dev, staging, prod Show usage: /deploy [environment] ``` ### File Existence Checks ```markdown --- description: Process configuration argument-hint: [config-file] --- Check file exists: !`test -f $1 && echo "EXISTS" || echo "MISSING"` If file exists: Process configuration: @$1 Otherwise: Explain where to place config file Show expected format Provide example configuration ``` ### Plugin Resource Validation ```markdown --- description: Run plugin analyzer allowed-tools: Bash(test:*) --- Validate plugin setup: - Script: !`test -x ${CLAUDE_PLUGIN_ROOT}/bin/analyze && echo "✓" || echo "✗"` - Config: !`test -f ${CLAUDE_PLUGIN_ROOT}/config.json && echo "✓" || echo "✗"` If all checks pass, run analysis. Otherwise, report missing components. ``` ### Error Handling ```markdown --- description: Build with error handling allowed-tools: Bash(*) --- Execute build: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/build.sh 2>&1 || echo "BUILD_FAILED"` If build succeeded: Report success and output location If build failed: Analyze error output Suggest likely causes Provide troubleshooting steps ``` **Best practices:** - Validate early in command - Provide helpful error messages - Suggest corrective actions - Handle edge cases gracefully --- For detailed frontmatter field specifications, see `references/frontmatter-reference.md`. For plugin-specific features and patterns, see `references/plugin-features-reference.md`. For command pattern examples, see `examples/` directory. ================================================ FILE: plugins/plugin-dev/skills/command-development/examples/plugin-commands.md ================================================ # Plugin Command Examples Practical examples of commands designed for Claude Code plugins, demonstrating plugin-specific patterns and features. ## Table of Contents 1. [Simple Plugin Command](#1-simple-plugin-command) 2. [Script-Based Analysis](#2-script-based-analysis) 3. [Template-Based Generation](#3-template-based-generation) 4. [Multi-Script Workflow](#4-multi-script-workflow) 5. [Configuration-Driven Deployment](#5-configuration-driven-deployment) 6. [Agent Integration](#6-agent-integration) 7. [Skill Integration](#7-skill-integration) 8. [Multi-Component Workflow](#8-multi-component-workflow) 9. [Validated Input Command](#9-validated-input-command) 10. [Environment-Aware Command](#10-environment-aware-command) --- ## 1. Simple Plugin Command **Use case:** Basic command that uses plugin script **File:** `commands/analyze.md` ```markdown --- description: Analyze code quality using plugin tools argument-hint: [file-path] allowed-tools: Bash(node:*), Read --- Analyze @$1 using plugin's quality checker: !`node ${CLAUDE_PLUGIN_ROOT}/scripts/quality-check.js $1` Review the analysis output and provide: 1. Summary of findings 2. Priority issues to address 3. Suggested improvements 4. Code quality score interpretation ``` **Key features:** - Uses `${CLAUDE_PLUGIN_ROOT}` for portable path - Combines file reference with script execution - Simple single-purpose command --- ## 2. Script-Based Analysis **Use case:** Run comprehensive analysis using multiple plugin scripts **File:** `commands/full-audit.md` ```markdown --- description: Complete code audit using plugin suite argument-hint: [directory] allowed-tools: Bash(*) model: sonnet --- Running complete audit on $1: **Security scan:** !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/security-scan.sh $1` **Performance analysis:** !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/perf-analyze.sh $1` **Best practices check:** !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/best-practices.sh $1` Analyze all results and create comprehensive report including: - Critical issues requiring immediate attention - Performance optimization opportunities - Security vulnerabilities and fixes - Overall health score and recommendations ``` **Key features:** - Multiple script executions - Organized output sections - Comprehensive workflow - Clear reporting structure --- ## 3. Template-Based Generation **Use case:** Generate documentation following plugin template **File:** `commands/gen-api-docs.md` ```markdown --- description: Generate API documentation from template argument-hint: [api-file] --- Template structure: @${CLAUDE_PLUGIN_ROOT}/templates/api-documentation.md API implementation: @$1 Generate complete API documentation following the template format above. Ensure documentation includes: - Endpoint descriptions with HTTP methods - Request/response schemas - Authentication requirements - Error codes and handling - Usage examples with curl commands - Rate limiting information Format output as markdown suitable for README or docs site. ``` **Key features:** - Uses plugin template - Combines template with source file - Standardized output format - Clear documentation structure --- ## 4. Multi-Script Workflow **Use case:** Orchestrate build, test, and deploy workflow **File:** `commands/release.md` ```markdown --- description: Execute complete release workflow argument-hint: [version] allowed-tools: Bash(*), Read --- Executing release workflow for version $1: **Step 1 - Pre-release validation:** !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/pre-release-check.sh $1` **Step 2 - Build artifacts:** !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/build-release.sh $1` **Step 3 - Run test suite:** !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/run-tests.sh` **Step 4 - Package release:** !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/package.sh $1` Review all step outputs and report: 1. Any failures or warnings 2. Build artifacts location 3. Test results summary 4. Next steps for deployment 5. Rollback plan if needed ``` **Key features:** - Multi-step workflow - Sequential script execution - Clear step numbering - Comprehensive reporting --- ## 5. Configuration-Driven Deployment **Use case:** Deploy using environment-specific plugin configuration **File:** `commands/deploy.md` ```markdown --- description: Deploy application to environment argument-hint: [environment] allowed-tools: Read, Bash(*) --- Deployment configuration for $1: @${CLAUDE_PLUGIN_ROOT}/config/$1-deploy.json Current git state: !`git rev-parse --short HEAD` Build info: !`cat package.json | grep -E '(name|version)'` Execute deployment to $1 environment using configuration above. Deployment checklist: 1. Validate configuration settings 2. Build application for $1 3. Run pre-deployment tests 4. Deploy to target environment 5. Run smoke tests 6. Verify deployment success 7. Update deployment log Report deployment status and any issues encountered. ``` **Key features:** - Environment-specific configuration - Dynamic config file loading - Pre-deployment validation - Structured checklist --- ## 6. Agent Integration **Use case:** Command that launches plugin agent for complex task **File:** `commands/deep-review.md` ```markdown --- description: Deep code review using plugin agent argument-hint: [file-or-directory] --- Initiate comprehensive code review of @$1 using the code-reviewer agent. The agent will perform: 1. **Static analysis** - Check for code smells and anti-patterns 2. **Security audit** - Identify potential vulnerabilities 3. **Performance review** - Find optimization opportunities 4. **Best practices** - Ensure code follows standards 5. **Documentation check** - Verify adequate documentation The agent has access to: - Plugin's linting rules: ${CLAUDE_PLUGIN_ROOT}/config/lint-rules.json - Security checklist: ${CLAUDE_PLUGIN_ROOT}/checklists/security.md - Performance guidelines: ${CLAUDE_PLUGIN_ROOT}/docs/performance.md Note: This uses the Task tool to launch the plugin's code-reviewer agent for thorough analysis. ``` **Key features:** - Delegates to plugin agent - Documents agent capabilities - References plugin resources - Clear scope definition --- ## 7. Skill Integration **Use case:** Command that leverages plugin skill for specialized knowledge **File:** `commands/document-api.md` ```markdown --- description: Document API following plugin standards argument-hint: [api-file] --- API source code: @$1 Generate API documentation following the plugin's API documentation standards. Use the api-documentation-standards skill to ensure: - **OpenAPI compliance** - Follow OpenAPI 3.0 specification - **Consistent formatting** - Use plugin's documentation style - **Complete coverage** - Document all endpoints and schemas - **Example quality** - Provide realistic usage examples - **Error documentation** - Cover all error scenarios The skill provides: - Standard documentation templates - API documentation best practices - Common patterns for this codebase - Quality validation criteria Generate production-ready API documentation. ``` **Key features:** - Invokes plugin skill by name - Documents skill purpose - Clear expectations - Leverages skill knowledge --- ## 8. Multi-Component Workflow **Use case:** Complex workflow using agents, skills, and scripts **File:** `commands/complete-review.md` ```markdown --- description: Comprehensive review using all plugin components argument-hint: [file-path] allowed-tools: Bash(node:*), Read --- Target file: @$1 Execute comprehensive review workflow: **Phase 1: Automated Analysis** Run plugin analyzer: !`node ${CLAUDE_PLUGIN_ROOT}/scripts/analyze.js $1` **Phase 2: Deep Review (Agent)** Launch the code-quality-reviewer agent for detailed analysis. Agent will examine: - Code structure and organization - Error handling patterns - Testing coverage - Documentation quality **Phase 3: Standards Check (Skill)** Use the coding-standards skill to validate: - Naming conventions - Code formatting - Best practices adherence - Framework-specific patterns **Phase 4: Report Generation** Template: @${CLAUDE_PLUGIN_ROOT}/templates/review-report.md Compile all findings into comprehensive report following template. **Phase 5: Recommendations** Generate prioritized action items: 1. Critical issues (must fix) 2. Important improvements (should fix) 3. Nice-to-have enhancements (could fix) Include specific file locations and suggested changes for each item. ``` **Key features:** - Multi-phase workflow - Combines scripts, agents, skills - Template-based reporting - Prioritized outputs --- ## 9. Validated Input Command **Use case:** Command with input validation and error handling **File:** `commands/build-env.md` ```markdown --- description: Build for specific environment with validation argument-hint: [environment] allowed-tools: Bash(*) --- Validate environment argument: !`echo "$1" | grep -E "^(dev|staging|prod)$" && echo "VALID" || echo "INVALID"` Check build script exists: !`test -x ${CLAUDE_PLUGIN_ROOT}/scripts/build.sh && echo "EXISTS" || echo "MISSING"` Verify configuration available: !`test -f ${CLAUDE_PLUGIN_ROOT}/config/$1.json && echo "FOUND" || echo "NOT_FOUND"` If all validations pass: **Configuration:** @${CLAUDE_PLUGIN_ROOT}/config/$1.json **Execute build:** !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/build.sh $1 2>&1` **Validation results:** !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/validate-build.sh $1 2>&1` Report build status and any issues. If validations fail: - Explain which validation failed - Provide expected values/locations - Suggest corrective actions - Document troubleshooting steps ``` **Key features:** - Input validation - Resource existence checks - Error handling - Helpful error messages - Graceful failure handling --- ## 10. Environment-Aware Command **Use case:** Command that adapts behavior based on environment **File:** `commands/run-checks.md` ```markdown --- description: Run environment-appropriate checks argument-hint: [environment] allowed-tools: Bash(*), Read --- Environment: $1 Load environment configuration: @${CLAUDE_PLUGIN_ROOT}/config/$1-checks.json Determine check level: !`echo "$1" | grep -E "^prod$" && echo "FULL" || echo "BASIC"` **For production environment:** - Full test suite: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/test-full.sh` - Security scan: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/security-scan.sh` - Performance audit: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/perf-check.sh` - Compliance check: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/compliance.sh` **For non-production environments:** - Basic tests: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/test-basic.sh` - Quick lint: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/lint.sh` Analyze results based on environment requirements: **Production:** All checks must pass with zero critical issues **Staging:** No critical issues, warnings acceptable **Development:** Focus on blocking issues only Report status and recommend proceed/block decision. ``` **Key features:** - Environment-aware logic - Conditional execution - Different validation levels - Appropriate reporting per environment --- ## Common Patterns Summary ### Pattern: Plugin Script Execution ```markdown !`node ${CLAUDE_PLUGIN_ROOT}/scripts/script-name.js $1` ``` Use for: Running plugin-provided Node.js scripts ### Pattern: Plugin Configuration Loading ```markdown @${CLAUDE_PLUGIN_ROOT}/config/config-name.json ``` Use for: Loading plugin configuration files ### Pattern: Plugin Template Usage ```markdown @${CLAUDE_PLUGIN_ROOT}/templates/template-name.md ``` Use for: Using plugin templates for generation ### Pattern: Agent Invocation ```markdown Launch the [agent-name] agent for [task description]. ``` Use for: Delegating complex tasks to plugin agents ### Pattern: Skill Reference ```markdown Use the [skill-name] skill to ensure [requirements]. ``` Use for: Leveraging plugin skills for specialized knowledge ### Pattern: Input Validation ```markdown Validate input: !`echo "$1" | grep -E "^pattern$" && echo "OK" || echo "ERROR"` ``` Use for: Validating command arguments ### Pattern: Resource Validation ```markdown Check exists: !`test -f ${CLAUDE_PLUGIN_ROOT}/path/file && echo "YES" || echo "NO"` ``` Use for: Verifying required plugin files exist --- ## Development Tips ### Testing Plugin Commands 1. **Test with plugin installed:** ```bash cd /path/to/plugin claude /command-name args ``` 2. **Verify ${CLAUDE_PLUGIN_ROOT} expansion:** ```bash # Add debug output to command !`echo "Plugin root: ${CLAUDE_PLUGIN_ROOT}"` ``` 3. **Test across different working directories:** ```bash cd /tmp && claude /command-name cd /other/project && claude /command-name ``` 4. **Validate resource availability:** ```bash # Check all plugin resources exist !`ls -la ${CLAUDE_PLUGIN_ROOT}/scripts/` !`ls -la ${CLAUDE_PLUGIN_ROOT}/config/` ``` ### Common Mistakes to Avoid 1. **Using relative paths instead of ${CLAUDE_PLUGIN_ROOT}:** ```markdown # Wrong !`node ./scripts/analyze.js` # Correct !`node ${CLAUDE_PLUGIN_ROOT}/scripts/analyze.js` ``` 2. **Forgetting to allow required tools:** ```markdown # Missing allowed-tools !`bash script.sh` # Will fail without Bash permission # Correct --- allowed-tools: Bash(*) --- !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/script.sh` ``` 3. **Not validating inputs:** ```markdown # Risky - no validation Deploy to $1 environment # Better - with validation Validate: !`echo "$1" | grep -E "^(dev|staging|prod)$" || echo "INVALID"` Deploy to $1 environment (if valid) ``` 4. **Hardcoding plugin paths:** ```markdown # Wrong - breaks on different installations @/home/user/.claude/plugins/my-plugin/config.json # Correct - works everywhere @${CLAUDE_PLUGIN_ROOT}/config.json ``` --- For detailed plugin-specific features, see `references/plugin-features-reference.md`. For general command development, see main `SKILL.md`. ================================================ FILE: plugins/plugin-dev/skills/command-development/examples/simple-commands.md ================================================ # Simple Command Examples Basic slash command patterns for common use cases. **Important:** All examples below are written as instructions FOR Claude (agent consumption), not messages TO users. Commands tell Claude what to do, not tell users what will happen. ## Example 1: Code Review Command **File:** `.claude/commands/review.md` ```markdown --- description: Review code for quality and issues allowed-tools: Read, Bash(git:*) --- Review the code in this repository for: 1. **Code Quality:** - Readability and maintainability - Consistent style and formatting - Appropriate abstraction levels 2. **Potential Issues:** - Logic errors or bugs - Edge cases not handled - Performance concerns 3. **Best Practices:** - Design patterns used correctly - Error handling present - Documentation adequate Provide specific feedback with file and line references. ``` **Usage:** ``` > /review ``` --- ## Example 2: Security Review Command **File:** `.claude/commands/security-review.md` ```markdown --- description: Review code for security vulnerabilities allowed-tools: Read, Grep model: sonnet --- Perform comprehensive security review checking for: **Common Vulnerabilities:** - SQL injection risks - Cross-site scripting (XSS) - Authentication/authorization issues - Insecure data handling - Hardcoded secrets or credentials **Security Best Practices:** - Input validation present - Output encoding correct - Secure defaults used - Error messages safe - Logging appropriate (no sensitive data) For each issue found: - File and line number - Severity (Critical/High/Medium/Low) - Description of vulnerability - Recommended fix Prioritize issues by severity. ``` **Usage:** ``` > /security-review ``` --- ## Example 3: Test Command with File Argument **File:** `.claude/commands/test-file.md` ```markdown --- description: Run tests for specific file argument-hint: [test-file] allowed-tools: Bash(npm:*), Bash(jest:*) --- Run tests for $1: Test execution: !`npm test $1` Analyze results: - Tests passed/failed - Code coverage - Performance issues - Flaky tests If failures found, suggest fixes based on error messages. ``` **Usage:** ``` > /test-file src/utils/helpers.test.ts ``` --- ## Example 4: Documentation Generator **File:** `.claude/commands/document.md` ```markdown --- description: Generate documentation for file argument-hint: [source-file] --- Generate comprehensive documentation for @$1 Include: **Overview:** - Purpose and responsibility - Main functionality - Dependencies **API Documentation:** - Function/method signatures - Parameter descriptions with types - Return values with types - Exceptions/errors thrown **Usage Examples:** - Basic usage - Common patterns - Edge cases **Implementation Notes:** - Algorithm complexity - Performance considerations - Known limitations Format as Markdown suitable for project documentation. ``` **Usage:** ``` > /document src/api/users.ts ``` --- ## Example 5: Git Status Summary **File:** `.claude/commands/git-status.md` ```markdown --- description: Summarize Git repository status allowed-tools: Bash(git:*) --- Repository Status Summary: **Current Branch:** !`git branch --show-current` **Status:** !`git status --short` **Recent Commits:** !`git log --oneline -5` **Remote Status:** !`git fetch && git status -sb` Provide: - Summary of changes - Suggested next actions - Any warnings or issues ``` **Usage:** ``` > /git-status ``` --- ## Example 6: Deployment Command **File:** `.claude/commands/deploy.md` ```markdown --- description: Deploy to specified environment argument-hint: [environment] [version] allowed-tools: Bash(kubectl:*), Read --- Deploy to $1 environment using version $2 **Pre-deployment Checks:** 1. Verify $1 configuration exists 2. Check version $2 is valid 3. Verify cluster accessibility: !`kubectl cluster-info` **Deployment Steps:** 1. Update deployment manifest with version $2 2. Apply configuration to $1 3. Monitor rollout status 4. Verify pod health 5. Run smoke tests **Rollback Plan:** Document current version for rollback if issues occur. Proceed with deployment? (yes/no) ``` **Usage:** ``` > /deploy staging v1.2.3 ``` --- ## Example 7: Comparison Command **File:** `.claude/commands/compare-files.md` ```markdown --- description: Compare two files argument-hint: [file1] [file2] --- Compare @$1 with @$2 **Analysis:** 1. **Differences:** - Lines added - Lines removed - Lines modified 2. **Functional Changes:** - Breaking changes - New features - Bug fixes - Refactoring 3. **Impact:** - Affected components - Required updates elsewhere - Migration requirements 4. **Recommendations:** - Code review focus areas - Testing requirements - Documentation updates needed Present as structured comparison report. ``` **Usage:** ``` > /compare-files src/old-api.ts src/new-api.ts ``` --- ## Example 8: Quick Fix Command **File:** `.claude/commands/quick-fix.md` ```markdown --- description: Quick fix for common issues argument-hint: [issue-description] model: haiku --- Quickly fix: $ARGUMENTS **Approach:** 1. Identify the issue 2. Find relevant code 3. Propose fix 4. Explain solution Focus on: - Simple, direct solution - Minimal changes - Following existing patterns - No breaking changes Provide code changes with file paths and line numbers. ``` **Usage:** ``` > /quick-fix button not responding to clicks > /quick-fix typo in error message ``` --- ## Example 9: Research Command **File:** `.claude/commands/research.md` ```markdown --- description: Research best practices for topic argument-hint: [topic] model: sonnet --- Research best practices for: $ARGUMENTS **Coverage:** 1. **Current State:** - How we currently handle this - Existing implementations 2. **Industry Standards:** - Common patterns - Recommended approaches - Tools and libraries 3. **Comparison:** - Our approach vs standards - Gaps or improvements needed - Migration considerations 4. **Recommendations:** - Concrete action items - Priority and effort estimates - Resources for implementation Provide actionable guidance based on research. ``` **Usage:** ``` > /research error handling in async operations > /research API authentication patterns ``` --- ## Example 10: Explain Code Command **File:** `.claude/commands/explain.md` ```markdown --- description: Explain how code works argument-hint: [file-or-function] --- Explain @$1 in detail **Explanation Structure:** 1. **Overview:** - What it does - Why it exists - How it fits in system 2. **Step-by-Step:** - Line-by-line walkthrough - Key algorithms or logic - Important details 3. **Inputs and Outputs:** - Parameters and types - Return values - Side effects 4. **Edge Cases:** - Error handling - Special cases - Limitations 5. **Usage Examples:** - How to call it - Common patterns - Integration points Explain at level appropriate for junior engineer. ``` **Usage:** ``` > /explain src/utils/cache.ts > /explain AuthService.login ``` --- ## Key Patterns ### Pattern 1: Read-Only Analysis ```markdown --- allowed-tools: Read, Grep --- Analyze but don't modify... ``` **Use for:** Code review, documentation, analysis ### Pattern 2: Git Operations ```markdown --- allowed-tools: Bash(git:*) --- !`git status` Analyze and suggest... ``` **Use for:** Repository status, commit analysis ### Pattern 3: Single Argument ```markdown --- argument-hint: [target] --- Process $1... ``` **Use for:** File operations, targeted actions ### Pattern 4: Multiple Arguments ```markdown --- argument-hint: [source] [target] [options] --- Process $1 to $2 with $3... ``` **Use for:** Workflows, deployments, comparisons ### Pattern 5: Fast Execution ```markdown --- model: haiku --- Quick simple task... ``` **Use for:** Simple, repetitive commands ### Pattern 6: File Comparison ```markdown Compare @$1 with @$2... ``` **Use for:** Diff analysis, migration planning ### Pattern 7: Context Gathering ```markdown --- allowed-tools: Bash(git:*), Read --- Context: !`git status` Files: @file1 @file2 Analyze... ``` **Use for:** Informed decision making ## Tips for Writing Simple Commands 1. **Start basic:** Single responsibility, clear purpose 2. **Add complexity gradually:** Start without frontmatter 3. **Test incrementally:** Verify each feature works 4. **Use descriptive names:** Command name should indicate purpose 5. **Document arguments:** Always use argument-hint 6. **Provide examples:** Show usage in comments 7. **Handle errors:** Consider missing arguments or files ================================================ FILE: plugins/plugin-dev/skills/command-development/references/advanced-workflows.md ================================================ # Advanced Workflow Patterns Multi-step command sequences and composition patterns for complex workflows. ## Overview Advanced workflows combine multiple commands, coordinate state across invocations, and create sophisticated automation sequences. These patterns enable building complex functionality from simple command building blocks. ## Multi-Step Command Patterns ### Sequential Workflow Command Commands that guide users through multi-step processes: ```markdown --- description: Complete PR review workflow argument-hint: [pr-number] allowed-tools: Bash(gh:*), Read, Grep --- # PR Review Workflow for #$1 ## Step 1: Fetch PR Details !`gh pr view $1 --json title,body,author,files` ## Step 2: Review Files Files changed: !`gh pr diff $1 --name-only` For each file: - Check code quality - Verify tests exist - Review documentation ## Step 3: Run Checks Test status: !`gh pr checks $1` Verify: - All tests passing - No merge conflicts - CI/CD successful ## Step 4: Provide Feedback Summarize: - Issues found (critical/minor) - Suggestions for improvement - Approval recommendation Would you like to: 1. Approve PR 2. Request changes 3. Leave comments only Reply with your choice and I'll help complete the action. ``` **Key features:** - Numbered steps for clarity - Bash execution for context - Decision points for user input - Next action suggestions ### State-Carrying Workflow Commands that maintain state between invocations: ```markdown --- description: Initialize deployment workflow allowed-tools: Write, Bash(git:*) --- # Initialize Deployment Creating deployment tracking file... Current branch: !`git branch --show-current` Latest commit: !`git log -1 --format=%H` Deployment state saved to `.claude/deployment-state.local.md`: \`\`\`markdown --- initialized: true branch: $(git branch --show-current) commit: $(git log -1 --format=%H) timestamp: $(date -u +%Y-%m-%dT%H:%M:%SZ) status: initialized --- # Deployment Tracking Branch: $(git branch --show-current) Started: $(date) Next steps: 1. Run tests: /deploy-test 2. Build: /deploy-build 3. Deploy: /deploy-execute \`\`\` State saved. Run `/deploy-test` to continue. ``` **Next command** (`/deploy-test`): ```markdown --- description: Run deployment tests allowed-tools: Read, Bash(npm:*) --- Reading deployment state from `.claude/deployment-state.local.md`... Running tests: !`npm test` Updating state to 'tested'... Tests complete. Run `/deploy-build` to continue. ``` **Pattern benefits:** - Persistent state across commands - Clear workflow progression - Safety checkpoints - Resume capability ### Conditional Workflow Branching Commands that adapt based on conditions: ```markdown --- description: Smart deployment workflow argument-hint: [environment] allowed-tools: Bash(git:*), Bash(npm:*), Read --- # Deploy to $1 ## Pre-flight Checks Branch: !`git branch --show-current` Status: !`git status --short` **Checking conditions:** 1. Branch status: - If main/master: Require approval - If feature branch: Warning about target - If hotfix: Fast-track process 2. Tests: !`npm test` - If tests fail: STOP - fix tests first - If tests pass: Continue 3. Environment: - If $1 = 'production': Extra validation - If $1 = 'staging': Standard process - If $1 = 'dev': Minimal checks **Workflow decision:** Based on above, proceeding with: [determined workflow] [Conditional steps based on environment and status] Ready to deploy? (yes/no) ``` ## Command Composition Patterns ### Command Chaining Commands designed to work together: ```markdown --- description: Prepare for code review --- # Prepare Code Review Running preparation sequence: 1. Format code: /format-code 2. Run linter: /lint-code 3. Run tests: /test-all 4. Generate coverage: /coverage-report 5. Create review summary: /review-summary This is a meta-command. After completing each step above, I'll compile results and prepare comprehensive review materials. Starting sequence... ``` **Individual commands** are simple: - `/format-code` - Just formats - `/lint-code` - Just lints - `/test-all` - Just tests **Composition command** orchestrates them. ### Pipeline Pattern Commands that process output from previous commands: ```markdown --- description: Analyze test failures --- # Analyze Test Failures ## Step 1: Get test results (Run /test-all first if not done) Reading test output... ## Step 2: Categorize failures - Flaky tests (random failures) - Consistent failures - New failures vs existing ## Step 3: Prioritize Rank by: - Impact (critical path vs edge case) - Frequency (always fails vs sometimes) - Effort (quick fix vs major work) ## Step 4: Generate fix plan For each failure: - Root cause hypothesis - Suggested fix approach - Estimated effort Would you like me to: 1. Fix highest priority failure 2. Generate detailed fix plans for all 3. Create GitHub issues for each ``` ### Parallel Execution Pattern Commands that coordinate multiple simultaneous operations: ```markdown --- description: Run comprehensive validation allowed-tools: Bash(*), Read --- # Comprehensive Validation Running validations in parallel... Starting: - Code quality checks - Security scanning - Dependency audit - Performance profiling This will take 2-3 minutes. I'll monitor all processes and report when complete. [Poll each process and report progress] All validations complete. Summary: - Quality: PASS (0 issues) - Security: WARN (2 minor issues) - Dependencies: PASS - Performance: PASS (baseline met) Details: [Collated results from all checks] ``` ## Workflow State Management ### Using .local.md Files Store workflow state in plugin-specific files: ```markdown .claude/plugin-name-workflow.local.md: --- workflow: deployment stage: testing started: 2025-01-15T10:30:00Z environment: staging branch: feature/new-api commit: abc123def tests_passed: false build_complete: false --- # Deployment Workflow State Current stage: Testing Started: 2025-01-15 10:30 UTC Completed steps: - ✅ Validation - ✅ Branch check - ⏳ Testing (in progress) Pending steps: - Build - Deploy - Smoke tests ``` **Reading state in commands:** ```markdown --- description: Continue deployment workflow allowed-tools: Read, Write --- Reading workflow state from .claude/plugin-name-workflow.local.md... Current stage: @.claude/plugin-name-workflow.local.md [Parse YAML frontmatter to determine next step] Next action based on state: [determined action] ``` ### Workflow Recovery Handle interrupted workflows: ```markdown --- description: Resume deployment workflow allowed-tools: Read --- # Resume Deployment Checking for interrupted workflow... State file: @.claude/plugin-name-workflow.local.md **Workflow found:** - Started: [timestamp] - Environment: [env] - Last completed: [step] **Recovery options:** 1. Resume from last step 2. Restart from beginning 3. Abort and clean up Which would you like? (1/2/3) ``` ## Workflow Coordination Patterns ### Cross-Command Communication Commands that signal each other: ```markdown --- description: Mark feature complete allowed-tools: Write --- # Mark Feature Complete Writing completion marker... Creating: .claude/feature-complete.flag This signals other commands that feature is ready for: - Integration testing (/integration-test will auto-detect) - Documentation generation (/docs-generate will include) - Release notes (/release-notes will add) Feature marked complete. ``` **Other commands check for flag:** ```markdown --- description: Generate release notes allowed-tools: Read, Bash(git:*) --- Checking for completed features... if [ -f .claude/feature-complete.flag ]; then Feature ready for release notes fi [Include in release notes] ``` ### Workflow Locking Prevent concurrent workflow execution: ```markdown --- description: Start deployment allowed-tools: Read, Write, Bash --- # Start Deployment Checking for active deployments... if [ -f .claude/deployment.lock ]; then ERROR: Deployment already in progress Started: [timestamp from lock file] Cannot start concurrent deployment. Wait for completion or run /deployment-abort Exit. fi Creating deployment lock... Deployment started. Lock created. [Proceed with deployment] ``` **Lock cleanup:** ```markdown --- description: Complete deployment allowed-tools: Write, Bash --- Deployment complete. Removing deployment lock... rm .claude/deployment.lock Ready for next deployment. ``` ## Advanced Argument Handling ### Optional Arguments with Defaults ```markdown --- description: Deploy with optional version argument-hint: [environment] [version] --- Environment: ${1:-staging} Version: ${2:-latest} Deploying ${2:-latest} to ${1:-staging}... Note: Using defaults for missing arguments: - Environment defaults to 'staging' - Version defaults to 'latest' ``` ### Argument Validation ```markdown --- description: Deploy to validated environment argument-hint: [environment] --- Environment: $1 Validating environment... valid_envs="dev staging production" if ! echo "$valid_envs" | grep -w "$1" > /dev/null; then ERROR: Invalid environment '$1' Valid options: dev, staging, production Exit. fi Environment validated. Proceeding... ``` ### Argument Transformation ```markdown --- description: Deploy with shorthand argument-hint: [env-shorthand] --- Input: $1 Expanding shorthand: - d/dev → development - s/stg → staging - p/prod → production case "$1" in d|dev) ENV="development";; s|stg) ENV="staging";; p|prod) ENV="production";; *) ENV="$1";; esac Deploying to: $ENV ``` ## Error Handling in Workflows ### Graceful Failure ```markdown --- description: Resilient deployment workflow --- # Deployment Workflow Running steps with error handling... ## Step 1: Tests !`npm test` if [ $? -ne 0 ]; then ERROR: Tests failed Options: 1. Fix tests and retry 2. Skip tests (NOT recommended) 3. Abort deployment What would you like to do? [Wait for user input before continuing] fi ## Step 2: Build [Continue only if Step 1 succeeded] ``` ### Rollback on Failure ```markdown --- description: Deployment with rollback --- # Deploy with Rollback Saving current state for rollback... Previous version: !`current-version.sh` Deploying new version... !`deploy.sh` if [ $? -ne 0 ]; then DEPLOYMENT FAILED Initiating automatic rollback... !`rollback.sh` Rolled back to previous version. Check logs for failure details. fi Deployment complete. ``` ### Checkpoint Recovery ```markdown --- description: Workflow with checkpoints --- # Multi-Stage Deployment ## Checkpoint 1: Validation !`validate.sh` echo "checkpoint:validation" >> .claude/deployment-checkpoints.log ## Checkpoint 2: Build !`build.sh` echo "checkpoint:build" >> .claude/deployment-checkpoints.log ## Checkpoint 3: Deploy !`deploy.sh` echo "checkpoint:deploy" >> .claude/deployment-checkpoints.log If any step fails, resume with: /deployment-resume [last-successful-checkpoint] ``` ## Best Practices ### Workflow Design 1. **Clear progression**: Number steps, show current position 2. **Explicit state**: Don't rely on implicit state 3. **User control**: Provide decision points 4. **Error recovery**: Handle failures gracefully 5. **Progress indication**: Show what's done, what's pending ### Command Composition 1. **Single responsibility**: Each command does one thing well 2. **Composable design**: Commands work together easily 3. **Standard interfaces**: Consistent input/output formats 4. **Loose coupling**: Commands don't depend on each other's internals ### State Management 1. **Persistent state**: Use .local.md files 2. **Atomic updates**: Write complete state files atomically 3. **State validation**: Check state file format/completeness 4. **Cleanup**: Remove stale state files 5. **Documentation**: Document state file formats ### Error Handling 1. **Fail fast**: Detect errors early 2. **Clear messages**: Explain what went wrong 3. **Recovery options**: Provide clear next steps 4. **State preservation**: Keep state for recovery 5. **Rollback capability**: Support undoing changes ## Example: Complete Deployment Workflow ### Initialize Command ```markdown --- description: Initialize deployment argument-hint: [environment] allowed-tools: Write, Bash(git:*) --- # Initialize Deployment to $1 Creating workflow state... \`\`\`yaml --- workflow: deployment environment: $1 branch: !`git branch --show-current` commit: !`git rev-parse HEAD` stage: initialized timestamp: !`date -u +%Y-%m-%dT%H:%M:%SZ` --- \`\`\` Written to .claude/deployment-state.local.md Next: Run /deployment-validate ``` ### Validation Command ```markdown --- description: Validate deployment allowed-tools: Read, Bash --- Reading state: @.claude/deployment-state.local.md Running validation... - Branch check: PASS - Tests: PASS - Build: PASS Updating state to 'validated'... Next: Run /deployment-execute ``` ### Execution Command ```markdown --- description: Execute deployment allowed-tools: Read, Bash, Write --- Reading state: @.claude/deployment-state.local.md Executing deployment to [environment]... !`deploy.sh [environment]` Deployment complete. Updating state to 'completed'... Cleanup: /deployment-cleanup ``` ### Cleanup Command ```markdown --- description: Clean up deployment allowed-tools: Bash --- Removing deployment state... rm .claude/deployment-state.local.md Deployment workflow complete. ``` This complete workflow demonstrates state management, sequential execution, error handling, and clean separation of concerns across multiple commands. ================================================ FILE: plugins/plugin-dev/skills/command-development/references/documentation-patterns.md ================================================ # Command Documentation Patterns Strategies for creating self-documenting, maintainable commands with excellent user experience. ## Overview Well-documented commands are easier to use, maintain, and distribute. Documentation should be embedded in the command itself, making it immediately accessible to users and maintainers. ## Self-Documenting Command Structure ### Complete Command Template ```markdown --- description: Clear, actionable description under 60 chars argument-hint: [arg1] [arg2] [optional-arg] allowed-tools: Read, Bash(git:*) model: sonnet --- # Command Implementation [Command prompt content here...] [Explain what will happen...] [Guide user through steps...] [Provide clear output...] ``` ### Documentation Comment Sections **PURPOSE**: Why the command exists - Problem it solves - Use cases - When to use vs when not to use **USAGE**: Basic syntax - Command invocation pattern - Required vs optional arguments - Default values **ARGUMENTS**: Detailed argument documentation - Each argument described - Type information - Valid values/ranges - Defaults **EXAMPLES**: Concrete usage examples - Common use cases - Edge cases - Expected outputs **REQUIREMENTS**: Prerequisites - Dependencies - Permissions - Environmental setup **RELATED COMMANDS**: Connections - Similar commands - Complementary commands - Alternative approaches **TROUBLESHOOTING**: Common issues - Known problems - Solutions - Workarounds **CHANGELOG**: Version history - What changed when - Breaking changes highlighted - Migration guidance ## In-Line Documentation Patterns ### Commented Sections ```markdown --- description: Complex multi-step command --- Checking prerequisites... - Git repository: !`git rev-parse --git-dir 2>/dev/null` - Branch exists: [validation logic] Analyzing differences between $1 and $2... [Analysis logic...] Based on analysis, recommend: [Recommendations...] ``` ### Inline Explanations ```markdown --- description: Deployment command with inline docs --- # Deploy to $1 ## Pre-flight Checks Current branch: !`git branch --show-current` if [ "$1" = "production" ] && [ "$(git branch --show-current)" != "main" ]; then ⚠️ WARNING: Not on main branch for production deploy This is unusual. Confirm this is intentional. fi Running tests: !`npm test` ✓ All checks passed ## Deployment Deploying to $1 environment... [Deployment steps...] Verifying deployment health... [Health checks...] Deployment complete! ## Next Steps 1. Monitor logs: /logs $1 2. Run smoke tests: /smoke-test $1 3. Notify team: /notify-deployment $1 ``` ### Decision Point Documentation ```markdown --- description: Interactive deployment command --- # Interactive Deployment ## Configuration Review Target: $1 Current version: !`cat version.txt` New version: $2 Review the above configuration. **Continue with deployment?** - Reply "yes" to proceed - Reply "no" to cancel - Reply "edit" to modify configuration [Await user input before continuing...] Proceeding with deployment... ``` ## Help Text Patterns ### Built-in Help Command Create a help subcommand for complex commands: ```markdown --- description: Main command with help argument-hint: [subcommand] [args] --- # Command Processor if [ "$1" = "help" ] || [ "$1" = "--help" ] || [ "$1" = "-h" ]; then **Command Help** USAGE: /command [subcommand] [args] SUBCOMMANDS: init [name] Initialize new configuration deploy [env] Deploy to environment status Show current status rollback Rollback last deployment help Show this help EXAMPLES: /command init my-project /command deploy staging /command status /command rollback For detailed help on a subcommand: /command [subcommand] --help Exit. fi [Regular command processing...] ``` ### Contextual Help Provide help based on context: ```markdown --- description: Context-aware command argument-hint: [operation] [target] --- # Context-Aware Operation if [ -z "$1" ]; then **No operation specified** Available operations: - analyze: Analyze target for issues - fix: Apply automatic fixes - report: Generate detailed report Usage: /command [operation] [target] Examples: /command analyze src/ /command fix src/app.js /command report Run /command help for more details. Exit. fi [Command continues if operation provided...] ``` ## Error Message Documentation ### Helpful Error Messages ```markdown --- description: Command with good error messages --- # Validation Command if [ -z "$1" ]; then ❌ ERROR: Missing required argument The 'file-path' argument is required. USAGE: /validate [file-path] EXAMPLE: /validate src/app.js Try again with a file path. Exit. fi if [ ! -f "$1" ]; then ❌ ERROR: File not found: $1 The specified file does not exist or is not accessible. COMMON CAUSES: 1. Typo in file path 2. File was deleted or moved 3. Insufficient permissions SUGGESTIONS: - Check spelling: $1 - Verify file exists: ls -la $(dirname "$1") - Check permissions: ls -l "$1" Exit. fi [Command continues if validation passes...] ``` ### Error Recovery Guidance ```markdown --- description: Command with recovery guidance --- # Operation Command Running operation... !`risky-operation.sh` if [ $? -ne 0 ]; then ❌ OPERATION FAILED The operation encountered an error and could not complete. WHAT HAPPENED: The risky-operation.sh script returned a non-zero exit code. WHAT THIS MEANS: - Changes may be partially applied - System may be in inconsistent state - Manual intervention may be needed RECOVERY STEPS: 1. Check operation logs: cat /tmp/operation.log 2. Verify system state: /check-state 3. If needed, rollback: /rollback-operation 4. Fix underlying issue 5. Retry operation: /retry-operation NEED HELP? - Check troubleshooting guide: /help troubleshooting - Contact support with error code: ERR_OP_FAILED_001 Exit. fi ``` ## Usage Example Documentation ### Embedded Examples ```markdown --- description: Command with embedded examples --- # Feature Command This command performs feature analysis with multiple options. ## Basic Usage \`\`\` /feature analyze src/ \`\`\` Analyzes all files in src/ directory for feature usage. ## Advanced Usage \`\`\` /feature analyze src/ --detailed \`\`\` Provides detailed analysis including: - Feature breakdown by file - Usage patterns - Optimization suggestions ## Use Cases **Use Case 1: Quick overview** \`\`\` /feature analyze . \`\`\` Get high-level feature summary of entire project. **Use Case 2: Specific directory** \`\`\` /feature analyze src/components \`\`\` Focus analysis on components directory only. **Use Case 3: Comparison** \`\`\` /feature analyze src/ --compare baseline.json \`\`\` Compare current features against baseline. --- Now processing your request... [Command implementation...] ``` ### Example-Driven Documentation ```markdown --- description: Example-heavy command --- # Transformation Command ## What This Does Transforms data from one format to another. ## Examples First ### Example 1: JSON to YAML **Input:** `data.json` \`\`\`json {"name": "test", "value": 42} \`\`\` **Command:** `/transform data.json yaml` **Output:** `data.yaml` \`\`\`yaml name: test value: 42 \`\`\` ### Example 2: CSV to JSON **Input:** `data.csv` \`\`\`csv name,value test,42 \`\`\` **Command:** `/transform data.csv json` **Output:** `data.json` \`\`\`json [{"name": "test", "value": "42"}] \`\`\` ### Example 3: With Options **Command:** `/transform data.json yaml --pretty --sort-keys` **Result:** Formatted YAML with sorted keys --- ## Your Transformation File: $1 Format: $2 [Perform transformation...] ``` ## Maintenance Documentation ### Version and Changelog ```markdown ``` ### Maintenance Notes ```markdown ``` ## README Documentation Commands should have companion README files: ```markdown # Command Name Brief description of what the command does. ## Installation This command is part of the [plugin-name] plugin. Install with: \`\`\` /plugin install plugin-name \`\`\` ## Usage Basic usage: \`\`\` /command-name [arg1] [arg2] \`\`\` ## Arguments - `arg1`: Description (required) - `arg2`: Description (optional, defaults to X) ## Examples ### Example 1: Basic Usage \`\`\` /command-name value1 value2 \`\`\` Description of what happens. ### Example 2: Advanced Usage \`\`\` /command-name value1 --option \`\`\` Description of advanced feature. ## Configuration Optional configuration file: `.claude/command-name.local.md` \`\`\`markdown --- default_arg: value enable_feature: true --- \`\`\` ## Requirements - Git 2.x or later - jq (for JSON processing) - Node.js 14+ (optional, for advanced features) ## Troubleshooting ### Issue: Command not found **Solution:** Ensure plugin is installed and enabled. ### Issue: Permission denied **Solution:** Check file permissions and allowed-tools setting. ## Contributing Contributions welcome! See [CONTRIBUTING.md](CONTRIBUTING.md). ## License MIT License - See [LICENSE](LICENSE). ## Support - Issues: https://github.com/user/plugin/issues - Docs: https://docs.example.com - Email: support@example.com ``` ## Best Practices ### Documentation Principles 1. **Write for your future self**: Assume you'll forget details 2. **Examples before explanations**: Show, then tell 3. **Progressive disclosure**: Basic info first, details available 4. **Keep it current**: Update docs when code changes 5. **Test your docs**: Verify examples actually work ### Documentation Locations 1. **In command file**: Core usage, examples, inline explanations 2. **README**: Installation, configuration, troubleshooting 3. **Separate docs**: Detailed guides, tutorials, API reference 4. **Comments**: Implementation details for maintainers ### Documentation Style 1. **Clear and concise**: No unnecessary words 2. **Active voice**: "Run the command" not "The command can be run" 3. **Consistent terminology**: Use same terms throughout 4. **Formatted well**: Use headings, lists, code blocks 5. **Accessible**: Assume reader is beginner ### Documentation Maintenance 1. **Version everything**: Track what changed when 2. **Deprecate gracefully**: Warn before removing features 3. **Migration guides**: Help users upgrade 4. **Archive old docs**: Keep old versions accessible 5. **Review regularly**: Ensure docs match reality ## Documentation Checklist Before releasing a command: - [ ] Description in frontmatter is clear - [ ] argument-hint documents all arguments - [ ] Usage examples in comments - [ ] Common use cases shown - [ ] Error messages are helpful - [ ] Requirements documented - [ ] Related commands listed - [ ] Changelog maintained - [ ] Version number updated - [ ] README created/updated - [ ] Examples actually work - [ ] Troubleshooting section complete With good documentation, commands become self-service, reducing support burden and improving user experience. ================================================ FILE: plugins/plugin-dev/skills/command-development/references/frontmatter-reference.md ================================================ # Command Frontmatter Reference Complete reference for YAML frontmatter fields in slash commands. ## Frontmatter Overview YAML frontmatter is optional metadata at the start of command files: ```markdown --- description: Brief description allowed-tools: Read, Write model: sonnet argument-hint: [arg1] [arg2] --- Command prompt content here... ``` All fields are optional. Commands work without any frontmatter. ## Field Specifications ### description **Type:** String **Required:** No **Default:** First line of command prompt **Max Length:** ~60 characters recommended for `/help` display **Purpose:** Describes what the command does, shown in `/help` output **Examples:** ```yaml description: Review code for security issues ``` ```yaml description: Deploy to staging environment ``` ```yaml description: Generate API documentation ``` **Best practices:** - Keep under 60 characters for clean display - Start with verb (Review, Deploy, Generate) - Be specific about what command does - Avoid redundant "command" or "slash command" **Good:** - ✅ "Review PR for code quality and security" - ✅ "Deploy application to specified environment" - ✅ "Generate comprehensive API documentation" **Bad:** - ❌ "This command reviews PRs" (unnecessary "This command") - ❌ "Review" (too vague) - ❌ "A command that reviews pull requests for code quality, security issues, and best practices" (too long) ### allowed-tools **Type:** String or Array of strings **Required:** No **Default:** Inherits from conversation permissions **Purpose:** Restrict or specify which tools command can use **Formats:** **Single tool:** ```yaml allowed-tools: Read ``` **Multiple tools (comma-separated):** ```yaml allowed-tools: Read, Write, Edit ``` **Multiple tools (array):** ```yaml allowed-tools: - Read - Write - Bash(git:*) ``` **Tool Patterns:** **Specific tools:** ```yaml allowed-tools: Read, Grep, Edit ``` **Bash with command filter:** ```yaml allowed-tools: Bash(git:*) # Only git commands allowed-tools: Bash(npm:*) # Only npm commands allowed-tools: Bash(docker:*) # Only docker commands ``` **All tools (not recommended):** ```yaml allowed-tools: "*" ``` **When to use:** 1. **Security:** Restrict command to safe operations ```yaml allowed-tools: Read, Grep # Read-only command ``` 2. **Clarity:** Document required tools ```yaml allowed-tools: Bash(git:*), Read ``` 3. **Bash execution:** Enable bash command output ```yaml allowed-tools: Bash(git status:*), Bash(git diff:*) ``` **Best practices:** - Be as restrictive as possible - Use command filters for Bash (e.g., `git:*` not `*`) - Only specify when different from conversation permissions - Document why specific tools are needed ### model **Type:** String **Required:** No **Default:** Inherits from conversation **Values:** `sonnet`, `opus`, `haiku` **Purpose:** Specify which Claude model executes the command **Examples:** ```yaml model: haiku # Fast, efficient for simple tasks ``` ```yaml model: sonnet # Balanced performance (default) ``` ```yaml model: opus # Maximum capability for complex tasks ``` **When to use:** **Use `haiku` for:** - Simple, formulaic commands - Fast execution needed - Low complexity tasks - Frequent invocations ```yaml --- description: Format code file model: haiku --- ``` **Use `sonnet` for:** - Standard commands (default) - Balanced speed/quality - Most common use cases ```yaml --- description: Review code changes model: sonnet --- ``` **Use `opus` for:** - Complex analysis - Architectural decisions - Deep code understanding - Critical tasks ```yaml --- description: Analyze system architecture model: opus --- ``` **Best practices:** - Omit unless specific need - Use `haiku` for speed when possible - Reserve `opus` for genuinely complex tasks - Test with different models to find right balance ### argument-hint **Type:** String **Required:** No **Default:** None **Purpose:** Document expected arguments for users and autocomplete **Format:** ```yaml argument-hint: [arg1] [arg2] [optional-arg] ``` **Examples:** **Single argument:** ```yaml argument-hint: [pr-number] ``` **Multiple required arguments:** ```yaml argument-hint: [environment] [version] ``` **Optional arguments:** ```yaml argument-hint: [file-path] [options] ``` **Descriptive names:** ```yaml argument-hint: [source-branch] [target-branch] [commit-message] ``` **Best practices:** - Use square brackets `[]` for each argument - Use descriptive names (not `arg1`, `arg2`) - Indicate optional vs required in description - Match order to positional arguments in command - Keep concise but clear **Examples by pattern:** **Simple command:** ```yaml --- description: Fix issue by number argument-hint: [issue-number] --- Fix issue #$1... ``` **Multi-argument:** ```yaml --- description: Deploy to environment argument-hint: [app-name] [environment] [version] --- Deploy $1 to $2 using version $3... ``` **With options:** ```yaml --- description: Run tests with options argument-hint: [test-pattern] [options] --- Run tests matching $1 with options: $2 ``` ### disable-model-invocation **Type:** Boolean **Required:** No **Default:** false **Purpose:** Prevent SlashCommand tool from programmatically invoking command **Examples:** ```yaml disable-model-invocation: true ``` **When to use:** 1. **Manual-only commands:** Commands requiring user judgment ```yaml --- description: Approve deployment to production disable-model-invocation: true --- ``` 2. **Destructive operations:** Commands with irreversible effects ```yaml --- description: Delete all test data disable-model-invocation: true --- ``` 3. **Interactive workflows:** Commands needing user input ```yaml --- description: Walk through setup wizard disable-model-invocation: true --- ``` **Default behavior (false):** - Command available to SlashCommand tool - Claude can invoke programmatically - Still available for manual invocation **When true:** - Command only invokable by user typing `/command` - Not available to SlashCommand tool - Safer for sensitive operations **Best practices:** - Use sparingly (limits Claude's autonomy) - Document why in command comments - Consider if command should exist if always manual ## Complete Examples ### Minimal Command No frontmatter needed: ```markdown Review this code for common issues and suggest improvements. ``` ### Simple Command Just description: ```markdown --- description: Review code for issues --- Review this code for common issues and suggest improvements. ``` ### Standard Command Description and tools: ```markdown --- description: Review Git changes allowed-tools: Bash(git:*), Read --- Current changes: !`git diff --name-only` Review each changed file for: - Code quality - Potential bugs - Best practices ``` ### Complex Command All common fields: ```markdown --- description: Deploy application to environment argument-hint: [app-name] [environment] [version] allowed-tools: Bash(kubectl:*), Bash(helm:*), Read model: sonnet --- Deploy $1 to $2 environment using version $3 Pre-deployment checks: - Verify $2 configuration - Check cluster status: !`kubectl cluster-info` - Validate version $3 exists Proceed with deployment following deployment runbook. ``` ### Manual-Only Command Restricted invocation: ```markdown --- description: Approve production deployment argument-hint: [deployment-id] disable-model-invocation: true allowed-tools: Bash(gh:*) --- Review deployment $1 for production approval: Deployment details: !`gh api /deployments/$1` Verify: - All tests passed - Security scan clean - Stakeholder approval - Rollback plan ready Type "APPROVED" to confirm deployment. ``` ## Validation ### Common Errors **Invalid YAML syntax:** ```yaml --- description: Missing quote allowed-tools: Read, Write model: sonnet --- # ❌ Missing closing quote above ``` **Fix:** Validate YAML syntax **Incorrect tool specification:** ```yaml allowed-tools: Bash # ❌ Missing command filter ``` **Fix:** Use `Bash(git:*)` format **Invalid model name:** ```yaml model: gpt4 # ❌ Not a valid Claude model ``` **Fix:** Use `sonnet`, `opus`, or `haiku` ### Validation Checklist Before committing command: - [ ] YAML syntax valid (no errors) - [ ] Description under 60 characters - [ ] allowed-tools uses proper format - [ ] model is valid value if specified - [ ] argument-hint matches positional arguments - [ ] disable-model-invocation used appropriately ## Best Practices Summary 1. **Start minimal:** Add frontmatter only when needed 2. **Document arguments:** Always use argument-hint with arguments 3. **Restrict tools:** Use most restrictive allowed-tools that works 4. **Choose right model:** Use haiku for speed, opus for complexity 5. **Manual-only sparingly:** Only use disable-model-invocation when necessary 6. **Clear descriptions:** Make commands discoverable in `/help` 7. **Test thoroughly:** Verify frontmatter works as expected ================================================ FILE: plugins/plugin-dev/skills/command-development/references/interactive-commands.md ================================================ # Interactive Command Patterns Comprehensive guide to creating commands that gather user feedback and make decisions through the AskUserQuestion tool. ## Overview Some commands need user input that doesn't work well with simple arguments. For example: - Choosing between multiple complex options with trade-offs - Selecting multiple items from a list - Making decisions that require explanation - Gathering preferences or configuration interactively For these cases, use the **AskUserQuestion tool** within command execution rather than relying on command arguments. ## When to Use AskUserQuestion ### Use AskUserQuestion When: 1. **Multiple choice decisions** with explanations needed 2. **Complex options** that require context to choose 3. **Multi-select scenarios** (choosing multiple items) 4. **Preference gathering** for configuration 5. **Interactive workflows** that adapt based on answers ### Use Command Arguments When: 1. **Simple values** (file paths, numbers, names) 2. **Known inputs** user already has 3. **Scriptable workflows** that should be automatable 4. **Fast invocations** where prompting would slow down ## AskUserQuestion Basics ### Tool Parameters ```typescript { questions: [ { question: "Which authentication method should we use?", header: "Auth method", // Short label (max 12 chars) multiSelect: false, // true for multiple selection options: [ { label: "OAuth 2.0", description: "Industry standard, supports multiple providers" }, { label: "JWT", description: "Stateless, good for APIs" }, { label: "Session", description: "Traditional, server-side state" } ] } ] } ``` **Key points:** - Users can always choose "Other" to provide custom input (automatic) - `multiSelect: true` allows selecting multiple options - Options should be 2-4 choices (not more) - Can ask 1-4 questions per tool call ## Command Pattern for User Interaction ### Basic Interactive Command ```markdown --- description: Interactive setup command allowed-tools: AskUserQuestion, Write --- # Interactive Plugin Setup This command will guide you through configuring the plugin with a series of questions. ## Step 1: Gather Configuration Use the AskUserQuestion tool to ask: **Question 1 - Deployment target:** - header: "Deploy to" - question: "Which deployment platform will you use?" - options: - AWS (Amazon Web Services with ECS/EKS) - GCP (Google Cloud with GKE) - Azure (Microsoft Azure with AKS) - Local (Docker on local machine) **Question 2 - Environment strategy:** - header: "Environments" - question: "How many environments do you need?" - options: - Single (Just production) - Standard (Dev, Staging, Production) - Complete (Dev, QA, Staging, Production) **Question 3 - Features to enable:** - header: "Features" - question: "Which features do you want to enable?" - multiSelect: true - options: - Auto-scaling (Automatic resource scaling) - Monitoring (Health checks and metrics) - CI/CD (Automated deployment pipeline) - Backups (Automated database backups) ## Step 2: Process Answers Based on the answers received from AskUserQuestion: 1. Parse the deployment target choice 2. Set up environment-specific configuration 3. Enable selected features 4. Generate configuration files ## Step 3: Generate Configuration Create `.claude/plugin-name.local.md` with: \`\`\`yaml --- deployment_target: [answer from Q1] environments: [answer from Q2] features: auto_scaling: [true if selected in Q3] monitoring: [true if selected in Q3] ci_cd: [true if selected in Q3] backups: [true if selected in Q3] --- # Plugin Configuration Generated: [timestamp] Target: [deployment_target] Environments: [environments] \`\`\` ## Step 4: Confirm and Next Steps Confirm configuration created and guide user on next steps. ``` ### Multi-Stage Interactive Workflow ```markdown --- description: Multi-stage interactive workflow allowed-tools: AskUserQuestion, Read, Write, Bash --- # Multi-Stage Deployment Setup This command walks through deployment setup in stages, adapting based on your answers. ## Stage 1: Basic Configuration Use AskUserQuestion to ask about deployment basics. Based on answers, determine which additional questions to ask. ## Stage 2: Advanced Options (Conditional) If user selected "Advanced" deployment in Stage 1: Use AskUserQuestion to ask about: - Load balancing strategy - Caching configuration - Security hardening options If user selected "Simple" deployment: - Skip advanced questions - Use sensible defaults ## Stage 3: Confirmation Show summary of all selections. Use AskUserQuestion for final confirmation: - header: "Confirm" - question: "Does this configuration look correct?" - options: - Yes (Proceed with setup) - No (Start over) - Modify (Let me adjust specific settings) If "Modify", ask which specific setting to change. ## Stage 4: Execute Setup Based on confirmed configuration, execute setup steps. ``` ## Interactive Question Design ### Question Structure **Good questions:** ```markdown Question: "Which database should we use for this project?" Header: "Database" Options: - PostgreSQL (Relational, ACID compliant, best for complex queries) - MongoDB (Document store, flexible schema, best for rapid iteration) - Redis (In-memory, fast, best for caching and sessions) ``` **Poor questions:** ```markdown Question: "Database?" // Too vague Header: "DB" // Unclear abbreviation Options: - Option 1 // Not descriptive - Option 2 ``` ### Option Design Best Practices **Clear labels:** - Use 1-5 words - Specific and descriptive - No jargon without context **Helpful descriptions:** - Explain what the option means - Mention key benefits or trade-offs - Help user make informed decision - Keep to 1-2 sentences **Appropriate number:** - 2-4 options per question - Don't overwhelm with too many choices - Group related options - "Other" automatically provided ### Multi-Select Questions **When to use multiSelect:** ```markdown Use AskUserQuestion for enabling features: Question: "Which features do you want to enable?" Header: "Features" multiSelect: true // Allow selecting multiple Options: - Logging (Detailed operation logs) - Metrics (Performance monitoring) - Alerts (Error notifications) - Backups (Automatic backups) ``` User can select any combination: none, some, or all. **When NOT to use multiSelect:** ```markdown Question: "Which authentication method?" multiSelect: false // Only one auth method makes sense ``` Mutually exclusive choices should not use multiSelect. ## Command Patterns with AskUserQuestion ### Pattern 1: Simple Yes/No Decision ```markdown --- description: Command with confirmation allowed-tools: AskUserQuestion, Bash --- # Destructive Operation This operation will delete all cached data. Use AskUserQuestion to confirm: Question: "This will delete all cached data. Are you sure?" Header: "Confirm" Options: - Yes (Proceed with deletion) - No (Cancel operation) If user selects "Yes": Execute deletion Report completion If user selects "No": Cancel operation Exit without changes ``` ### Pattern 2: Multiple Configuration Questions ```markdown --- description: Multi-question configuration allowed-tools: AskUserQuestion, Write --- # Project Configuration Setup Gather configuration through multiple questions. Use AskUserQuestion with multiple questions in one call: **Question 1:** - question: "Which programming language?" - header: "Language" - options: Python, TypeScript, Go, Rust **Question 2:** - question: "Which test framework?" - header: "Testing" - options: Jest, PyTest, Go Test, Cargo Test (Adapt based on language from Q1) **Question 3:** - question: "Which CI/CD platform?" - header: "CI/CD" - options: GitHub Actions, GitLab CI, CircleCI **Question 4:** - question: "Which features do you need?" - header: "Features" - multiSelect: true - options: Linting, Type checking, Code coverage, Security scanning Process all answers together to generate cohesive configuration. ``` ### Pattern 3: Conditional Question Flow ```markdown --- description: Conditional interactive workflow allowed-tools: AskUserQuestion, Read, Write --- # Adaptive Configuration ## Question 1: Deployment Complexity Use AskUserQuestion: Question: "How complex is your deployment?" Header: "Complexity" Options: - Simple (Single server, straightforward) - Standard (Multiple servers, load balancing) - Complex (Microservices, orchestration) ## Conditional Questions Based on Answer If answer is "Simple": - No additional questions - Use minimal configuration If answer is "Standard": - Ask about load balancing strategy - Ask about scaling policy If answer is "Complex": - Ask about orchestration platform (Kubernetes, Docker Swarm) - Ask about service mesh (Istio, Linkerd, None) - Ask about monitoring (Prometheus, Datadog, CloudWatch) - Ask about logging aggregation ## Process Conditional Answers Generate configuration appropriate for selected complexity level. ``` ### Pattern 4: Iterative Collection ```markdown --- description: Collect multiple items iteratively allowed-tools: AskUserQuestion, Write --- # Collect Team Members We'll collect team member information for the project. ## Question: How many team members? Use AskUserQuestion: Question: "How many team members should we set up?" Header: "Team size" Options: - 2 people - 3 people - 4 people - 6 people ## Iterate Through Team Members For each team member (1 to N based on answer): Use AskUserQuestion for member details: Question: "What role for team member [number]?" Header: "Role" Options: - Frontend Developer - Backend Developer - DevOps Engineer - QA Engineer - Designer Store each member's information. ## Generate Team Configuration After collecting all N members, create team configuration file with all members and their roles. ``` ### Pattern 5: Dependency Selection ```markdown --- description: Select dependencies with multi-select allowed-tools: AskUserQuestion --- # Configure Project Dependencies ## Question: Required Libraries Use AskUserQuestion with multiSelect: Question: "Which libraries does your project need?" Header: "Dependencies" multiSelect: true Options: - React (UI framework) - Express (Web server) - TypeORM (Database ORM) - Jest (Testing framework) - Axios (HTTP client) User can select any combination. ## Process Selections For each selected library: - Add to package.json dependencies - Generate sample configuration - Create usage examples - Update documentation ``` ## Best Practices for Interactive Commands ### Question Design 1. **Clear and specific**: Question should be unambiguous 2. **Concise header**: Max 12 characters for clean display 3. **Helpful options**: Labels are clear, descriptions explain trade-offs 4. **Appropriate count**: 2-4 options per question, 1-4 questions per call 5. **Logical order**: Questions flow naturally ### Error Handling ```markdown # Handle AskUserQuestion Responses After calling AskUserQuestion, verify answers received: If answers are empty or invalid: Something went wrong gathering responses. Please try again or provide configuration manually: [Show alternative approach] Exit. If answers look correct: Process as expected ``` ### Progressive Disclosure ```markdown # Start Simple, Get Detailed as Needed ## Question 1: Setup Type Use AskUserQuestion: Question: "How would you like to set up?" Header: "Setup type" Options: - Quick (Use recommended defaults) - Custom (Configure all options) - Guided (Step-by-step with explanations) If "Quick": Apply defaults, minimal questions If "Custom": Ask all available configuration questions If "Guided": Ask questions with extra explanation Provide recommendations along the way ``` ### Multi-Select Guidelines **Good multi-select use:** ```markdown Question: "Which features do you want to enable?" multiSelect: true Options: - Logging - Metrics - Alerts - Backups Reason: User might want any combination ``` **Bad multi-select use:** ```markdown Question: "Which database engine?" multiSelect: true // ❌ Should be single-select Reason: Can only use one database engine ``` ## Advanced Patterns ### Validation Loop ```markdown --- description: Interactive with validation allowed-tools: AskUserQuestion, Bash --- # Setup with Validation ## Gather Configuration Use AskUserQuestion to collect settings. ## Validate Configuration Check if configuration is valid: - Required dependencies available? - Settings compatible with each other? - No conflicts detected? If validation fails: Show validation errors Use AskUserQuestion to ask: Question: "Configuration has issues. What would you like to do?" Header: "Next step" Options: - Fix (Adjust settings to resolve issues) - Override (Proceed despite warnings) - Cancel (Abort setup) Based on answer, retry or proceed or exit. ``` ### Build Configuration Incrementally ```markdown --- description: Incremental configuration builder allowed-tools: AskUserQuestion, Write, Read --- # Incremental Setup ## Phase 1: Core Settings Use AskUserQuestion for core settings. Save to `.claude/config-partial.yml` ## Phase 2: Review Core Settings Show user the core settings: Based on these core settings, you need to configure: - [Setting A] (because you chose [X]) - [Setting B] (because you chose [Y]) Ready to continue? ## Phase 3: Detailed Settings Use AskUserQuestion for settings based on Phase 1 answers. Merge with core settings. ## Phase 4: Final Review Present complete configuration. Use AskUserQuestion for confirmation: Question: "Is this configuration correct?" Options: - Yes (Save and apply) - No (Start over) - Modify (Edit specific settings) ``` ### Dynamic Options Based on Context ```markdown --- description: Context-aware questions allowed-tools: AskUserQuestion, Bash, Read --- # Context-Aware Setup ## Detect Current State Check existing configuration: - Current language: !`detect-language.sh` - Existing frameworks: !`detect-frameworks.sh` - Available tools: !`check-tools.sh` ## Ask Context-Appropriate Questions Based on detected language, ask relevant questions. If language is TypeScript: Use AskUserQuestion: Question: "Which TypeScript features should we enable?" Options: - Strict Mode (Maximum type safety) - Decorators (Experimental decorator support) - Path Mapping (Module path aliases) If language is Python: Use AskUserQuestion: Question: "Which Python tools should we configure?" Options: - Type Hints (mypy for type checking) - Black (Code formatting) - Pylint (Linting and style) Questions adapt to project context. ``` ## Real-World Example: Multi-Agent Swarm Launch **From multi-agent-swarm plugin:** ```markdown --- description: Launch multi-agent swarm allowed-tools: AskUserQuestion, Read, Write, Bash --- # Launch Multi-Agent Swarm ## Interactive Mode (No Task List Provided) If user didn't provide task list file, help create one interactively. ### Question 1: Agent Count Use AskUserQuestion: Question: "How many agents should we launch?" Header: "Agent count" Options: - 2 agents (Best for simple projects) - 3 agents (Good for medium projects) - 4 agents (Standard team size) - 6 agents (Large projects) - 8 agents (Complex multi-component projects) ### Question 2: Task Definition Approach Use AskUserQuestion: Question: "How would you like to define tasks?" Header: "Task setup" Options: - File (I have a task list file ready) - Guided (Help me create tasks interactively) - Custom (Other approach) If "File": Ask for file path Validate file exists and has correct format If "Guided": Enter iterative task creation mode (see below) ### Question 3: Coordination Mode Use AskUserQuestion: Question: "How should agents coordinate?" Header: "Coordination" Options: - Team Leader (One agent coordinates others) - Collaborative (Agents coordinate as peers) - Autonomous (Independent work, minimal coordination) ### Iterative Task Creation (If "Guided" Selected) For each agent (1 to N from Question 1): **Question A: Agent Name** Question: "What should we call agent [number]?" Header: "Agent name" Options: - auth-agent - api-agent - ui-agent - db-agent (Provide relevant suggestions based on common patterns) **Question B: Task Type** Question: "What task for [agent-name]?" Header: "Task type" Options: - Authentication (User auth, JWT, OAuth) - API Endpoints (REST/GraphQL APIs) - UI Components (Frontend components) - Database (Schema, migrations, queries) - Testing (Test suites and coverage) - Documentation (Docs, README, guides) **Question C: Dependencies** Question: "What does [agent-name] depend on?" Header: "Dependencies" multiSelect: true Options: - [List of previously defined agents] - No dependencies **Question D: Base Branch** Question: "Which base branch for PR?" Header: "PR base" Options: - main - staging - develop Store all task information for each agent. ### Generate Task List File After collecting all agent task details: 1. Ask for project name 2. Generate task list in proper format 3. Save to `.daisy/swarm/tasks.md` 4. Show user the file path 5. Proceed with launch using generated task list ``` ## Best Practices ### Question Writing 1. **Be specific**: "Which database?" not "Choose option?" 2. **Explain trade-offs**: Describe pros/cons in option descriptions 3. **Provide context**: Question text should stand alone 4. **Guide decisions**: Help user make informed choice 5. **Keep concise**: Header max 12 chars, descriptions 1-2 sentences ### Option Design 1. **Meaningful labels**: Specific, clear names 2. **Informative descriptions**: Explain what each option does 3. **Show trade-offs**: Help user understand implications 4. **Consistent detail**: All options equally explained 5. **2-4 options**: Not too few, not too many ### Flow Design 1. **Logical order**: Questions flow naturally 2. **Build on previous**: Later questions use earlier answers 3. **Minimize questions**: Ask only what's needed 4. **Group related**: Ask related questions together 5. **Show progress**: Indicate where in flow ### User Experience 1. **Set expectations**: Tell user what to expect 2. **Explain why**: Help user understand purpose 3. **Provide defaults**: Suggest recommended options 4. **Allow escape**: Let user cancel or restart 5. **Confirm actions**: Summarize before executing ## Common Patterns ### Pattern: Feature Selection ```markdown Use AskUserQuestion: Question: "Which features do you need?" Header: "Features" multiSelect: true Options: - Authentication - Authorization - Rate Limiting - Caching ``` ### Pattern: Environment Configuration ```markdown Use AskUserQuestion: Question: "Which environment is this?" Header: "Environment" Options: - Development (Local development) - Staging (Pre-production testing) - Production (Live environment) ``` ### Pattern: Priority Selection ```markdown Use AskUserQuestion: Question: "What's the priority for this task?" Header: "Priority" Options: - Critical (Must be done immediately) - High (Important, do soon) - Medium (Standard priority) - Low (Nice to have) ``` ### Pattern: Scope Selection ```markdown Use AskUserQuestion: Question: "What scope should we analyze?" Header: "Scope" Options: - Current file (Just this file) - Current directory (All files in directory) - Entire project (Full codebase scan) ``` ## Combining Arguments and Questions ### Use Both Appropriately **Arguments for known values:** ```markdown --- argument-hint: [project-name] allowed-tools: AskUserQuestion, Write --- Setup for project: $1 Now gather additional configuration... Use AskUserQuestion for options that require explanation. ``` **Questions for complex choices:** ```markdown Project name from argument: $1 Now use AskUserQuestion to choose: - Architecture pattern - Technology stack - Deployment strategy These require explanation, so questions work better than arguments. ``` ## Troubleshooting **Questions not appearing:** - Verify AskUserQuestion in allowed-tools - Check question format is correct - Ensure options array has 2-4 items **User can't make selection:** - Check option labels are clear - Verify descriptions are helpful - Consider if too many options - Ensure multiSelect setting is correct **Flow feels confusing:** - Reduce number of questions - Group related questions - Add explanation between stages - Show progress through workflow With AskUserQuestion, commands become interactive wizards that guide users through complex decisions while maintaining the clarity that simple arguments provide for straightforward inputs. ================================================ FILE: plugins/plugin-dev/skills/command-development/references/marketplace-considerations.md ================================================ # Marketplace Considerations for Commands Guidelines for creating commands designed for distribution and marketplace success. ## Overview Commands distributed through marketplaces need additional consideration beyond personal use commands. They must work across environments, handle diverse use cases, and provide excellent user experience for unknown users. ## Design for Distribution ### Universal Compatibility **Cross-platform considerations:** ```markdown --- description: Cross-platform command allowed-tools: Bash(*) --- # Platform-Aware Command Detecting platform... case "$(uname)" in Darwin*) PLATFORM="macOS" ;; Linux*) PLATFORM="Linux" ;; MINGW*|MSYS*|CYGWIN*) PLATFORM="Windows" ;; *) PLATFORM="Unknown" ;; esac Platform: $PLATFORM if [ "$PLATFORM" = "Windows" ]; then # Windows-specific handling PATH_SEP="\\" NULL_DEVICE="NUL" else # Unix-like handling PATH_SEP="/" NULL_DEVICE="/dev/null" fi [Platform-appropriate implementation...] ``` **Avoid platform-specific commands:** ```markdown !`pbcopy < file.txt` if command -v pbcopy > /dev/null; then pbcopy < file.txt elif command -v xclip > /dev/null; then xclip -selection clipboard < file.txt elif command -v clip.exe > /dev/null; then cat file.txt | clip.exe else echo "Clipboard not available on this platform" fi ``` ### Minimal Dependencies **Check for required tools:** ```markdown --- description: Dependency-aware command allowed-tools: Bash(*) --- # Check Dependencies Required tools: - git - jq - node Checking availability... MISSING_DEPS="" for tool in git jq node; do if ! command -v $tool > /dev/null; then MISSING_DEPS="$MISSING_DEPS $tool" fi done if [ -n "$MISSING_DEPS" ]; then ❌ ERROR: Missing required dependencies:$MISSING_DEPS INSTALLATION: - git: https://git-scm.com/downloads - jq: https://stedolan.github.io/jq/download/ - node: https://nodejs.org/ Install missing tools and try again. Exit. fi ✓ All dependencies available [Continue with command...] ``` **Document optional dependencies:** ```markdown ``` ### Graceful Degradation **Handle missing features:** ```markdown --- description: Feature-aware command --- # Feature Detection Detecting available features... FEATURES="" if command -v gh > /dev/null; then FEATURES="$FEATURES github" fi if command -v docker > /dev/null; then FEATURES="$FEATURES docker" fi Available features: $FEATURES if echo "$FEATURES" | grep -q "github"; then # Full functionality with GitHub integration echo "✓ GitHub integration available" else # Reduced functionality without GitHub echo "⚠ Limited functionality: GitHub CLI not installed" echo " Install 'gh' for full features" fi [Adapt behavior based on available features...] ``` ## User Experience for Unknown Users ### Clear Onboarding **First-run experience:** ```markdown --- description: Command with onboarding allowed-tools: Read, Write --- # First Run Check if [ ! -f ".claude/command-initialized" ]; then **Welcome to Command Name!** This appears to be your first time using this command. WHAT THIS COMMAND DOES: [Brief explanation of purpose and benefits] QUICK START: 1. Basic usage: /command [arg] 2. For help: /command help 3. Examples: /command examples SETUP: No additional setup required. You're ready to go! ✓ Initialization complete [Create initialization marker] Ready to proceed with your request... fi [Normal command execution...] ``` **Progressive feature discovery:** ```markdown --- description: Command with tips --- # Command Execution [Main functionality...] --- 💡 TIP: Did you know? You can speed up this command with the --fast flag: /command --fast [args] For more tips: /command tips ``` ### Comprehensive Error Handling **Anticipate user mistakes:** ```markdown --- description: Forgiving command --- # User Input Handling Argument: "$1" if [ "$1" = "hlep" ] || [ "$1" = "hepl" ]; then Did you mean: help? Showing help instead... [Display help] Exit. fi if [ "$1" != "valid-option1" ] && [ "$1" != "valid-option2" ]; then ❌ Unknown option: $1 Did you mean: - valid-option1 (most similar) - valid-option2 For all options: /command help Exit. fi [Command continues...] ``` **Helpful diagnostics:** ```markdown --- description: Diagnostic command --- # Operation Failed The operation could not complete. **Diagnostic Information:** Environment: - Platform: $(uname) - Shell: $SHELL - Working directory: $(pwd) - Command: /command $@ Checking common issues: - Git repository: $(git rev-parse --git-dir 2>&1) - Write permissions: $(test -w . && echo "OK" || echo "DENIED") - Required files: $(test -f config.yml && echo "Found" || echo "Missing") This information helps debug the issue. For support, include the above diagnostics. ``` ## Distribution Best Practices ### Namespace Awareness **Avoid name collisions:** ```markdown --- description: Namespaced command --- # Plugin Name Command [Implementation...] ``` **Document naming rationale:** ```markdown ``` ### Configurability **User preferences:** ```markdown --- description: Configurable command allowed-tools: Read --- # Load User Configuration Default configuration: - verbose: false - color: true - max_results: 10 Checking for user config: .claude/plugin-name.local.md if [ -f ".claude/plugin-name.local.md" ]; then # Parse YAML frontmatter for settings VERBOSE=$(grep "^verbose:" .claude/plugin-name.local.md | cut -d: -f2 | tr -d ' ') COLOR=$(grep "^color:" .claude/plugin-name.local.md | cut -d: -f2 | tr -d ' ') MAX_RESULTS=$(grep "^max_results:" .claude/plugin-name.local.md | cut -d: -f2 | tr -d ' ') echo "✓ Using user configuration" else echo "Using default configuration" echo "Create .claude/plugin-name.local.md to customize" fi [Use configuration in command...] ``` **Sensible defaults:** ```markdown --- description: Command with smart defaults --- # Smart Defaults Configuration: - Format: ${FORMAT:-json} # Defaults to json - Output: ${OUTPUT:-stdout} # Defaults to stdout - Verbose: ${VERBOSE:-false} # Defaults to false These defaults work for 80% of use cases. Override with arguments: /command --format yaml --output file.txt --verbose Or set in .claude/plugin-name.local.md: \`\`\`yaml --- format: yaml output: custom.txt verbose: true --- \`\`\` ``` ### Version Compatibility **Version checking:** ```markdown --- description: Version-aware command --- # Version Check Command version: 2.1.0 Plugin version: [detect from plugin.json] if [ "$PLUGIN_VERSION" < "2.0.0" ]; then ❌ ERROR: Incompatible plugin version This command requires plugin version >= 2.0.0 Current version: $PLUGIN_VERSION Update plugin: /plugin update plugin-name Exit. fi ✓ Version compatible [Command continues...] ``` **Deprecation warnings:** ```markdown --- description: Command with deprecation warnings --- # Deprecation Check if [ "$1" = "--old-flag" ]; then ⚠️ DEPRECATION WARNING The --old-flag option is deprecated as of v2.0.0 It will be removed in v3.0.0 (est. June 2025) Use instead: --new-flag Example: Old: /command --old-flag value New: /command --new-flag value See migration guide: /command migrate Continuing with deprecated behavior for now... fi [Handle both old and new flags during deprecation period...] ``` ## Marketplace Presentation ### Command Discovery **Descriptive naming:** ```markdown --- description: Review pull request with security and quality checks --- ``` ```markdown --- description: Do the thing --- ``` **Searchable keywords:** ```markdown ``` ### Showcase Examples **Compelling demonstrations:** ```markdown --- description: Advanced code analysis command --- # Code Analysis Command This command performs deep code analysis with actionable insights. ## Demo: Quick Security Audit Try it now: \`\`\` /analyze-code src/ --security \`\`\` **What you'll get:** - Security vulnerability detection - Code quality metrics - Performance bottleneck identification - Actionable recommendations **Sample output:** \`\`\` Security Analysis Results ========================= 🔴 Critical (2): - SQL injection risk in users.js:45 - XSS vulnerability in display.js:23 🟡 Warnings (5): - Unvalidated input in api.js:67 ... Recommendations: 1. Fix critical issues immediately 2. Review warnings before next release 3. Run /analyze-code --fix for auto-fixes \`\`\` --- Ready to analyze your code... [Command implementation...] ``` ### User Reviews and Feedback **Feedback mechanism:** ```markdown --- description: Command with feedback --- # Command Complete [Command results...] --- **How was your experience?** This helps improve the command for everyone. Rate this command: - 👍 Helpful - 👎 Not helpful - 🐛 Found a bug - 💡 Have a suggestion Reply with an emoji or: - /command feedback Your feedback matters! ``` **Usage analytics preparation:** ```markdown ``` ## Quality Standards ### Professional Polish **Consistent branding:** ```markdown --- description: Branded command --- # ✨ Command Name Part of the [Plugin Name] suite [Command functionality...] --- **Need Help?** - Documentation: https://docs.example.com - Support: support@example.com - Community: https://community.example.com Powered by Plugin Name v2.1.0 ``` **Attention to detail:** ```markdown ✓ Use proper emoji/symbols consistently ✓ Align output columns neatly ✓ Format numbers with thousands separators ✓ Use color/formatting appropriately ✓ Provide progress indicators ✓ Show estimated time remaining ✓ Confirm successful operations ``` ### Reliability **Idempotency:** ```markdown --- description: Idempotent command --- # Safe Repeated Execution Checking if operation already completed... if [ -f ".claude/operation-completed.flag" ]; then ℹ️ Operation already completed Completed at: $(cat .claude/operation-completed.flag) To re-run: 1. Remove flag: rm .claude/operation-completed.flag 2. Run command again Otherwise, no action needed. Exit. fi Performing operation... [Safe, repeatable operation...] Marking complete... echo "$(date)" > .claude/operation-completed.flag ``` **Atomic operations:** ```markdown --- description: Atomic command --- # Atomic Operation This operation is atomic - either fully succeeds or fully fails. Creating temporary workspace... TEMP_DIR=$(mktemp -d) Performing changes in isolated environment... [Make changes in $TEMP_DIR] if [ $? -eq 0 ]; then ✓ Changes validated Applying changes atomically... mv $TEMP_DIR/* ./target/ ✓ Operation complete else ❌ Changes failed validation Rolling back... rm -rf $TEMP_DIR No changes applied. Safe to retry. fi ``` ## Testing for Distribution ### Pre-Release Checklist ```markdown ``` ### Beta Testing **Beta release approach:** ```markdown --- description: Beta command (v0.9.0) --- # 🧪 Beta Command **This is a beta release** Features may change based on feedback. BETA STATUS: - Version: 0.9.0 - Stability: Experimental - Support: Limited - Feedback: Encouraged Known limitations: - Performance not optimized - Some edge cases not handled - Documentation incomplete Help improve this command: - Report issues: /command report-issue - Suggest features: /command suggest - Join beta testers: /command join-beta --- [Command implementation...] --- **Thank you for beta testing!** Your feedback helps make this command better. ``` ## Maintenance and Updates ### Update Strategy **Versioned commands:** ```markdown ``` **Update notifications:** ```markdown --- description: Update-aware command --- # Check for Updates Current version: 2.1.0 Latest version: [check if available] if [ "$CURRENT_VERSION" != "$LATEST_VERSION" ]; then 📢 UPDATE AVAILABLE New version: $LATEST_VERSION Current: $CURRENT_VERSION What's new: - Feature improvements - Bug fixes - Performance enhancements Update with: /plugin update plugin-name Release notes: https://releases.example.com/v$LATEST_VERSION fi [Command continues...] ``` ## Best Practices Summary ### Distribution Design 1. **Universal**: Works across platforms and environments 2. **Self-contained**: Minimal dependencies, clear requirements 3. **Graceful**: Degrades gracefully when features unavailable 4. **Forgiving**: Anticipates and handles user mistakes 5. **Helpful**: Clear errors, good defaults, excellent docs ### Marketplace Success 1. **Discoverable**: Clear name, good description, searchable keywords 2. **Professional**: Polished presentation, consistent branding 3. **Reliable**: Tested thoroughly, handles edge cases 4. **Maintainable**: Versioned, updated regularly, supported 5. **User-focused**: Great UX, responsive to feedback ### Quality Standards 1. **Complete**: Fully documented, all features working 2. **Tested**: Works in real environments, edge cases handled 3. **Secure**: No vulnerabilities, safe operations 4. **Performant**: Reasonable speed, resource-efficient 5. **Ethical**: Privacy-respecting, user consent With these considerations, commands become marketplace-ready and delight users across diverse environments and use cases. ================================================ FILE: plugins/plugin-dev/skills/command-development/references/plugin-features-reference.md ================================================ # Plugin-Specific Command Features Reference This reference covers features and patterns specific to commands bundled in Claude Code plugins. ## Table of Contents - [Plugin Command Discovery](#plugin-command-discovery) - [CLAUDE_PLUGIN_ROOT Environment Variable](#claude_plugin_root-environment-variable) - [Plugin Command Patterns](#plugin-command-patterns) - [Integration with Plugin Components](#integration-with-plugin-components) - [Validation Patterns](#validation-patterns) ## Plugin Command Discovery ### Auto-Discovery Claude Code automatically discovers commands in plugins using the following locations: ``` plugin-name/ ├── commands/ # Auto-discovered commands │ ├── foo.md # /foo (plugin:plugin-name) │ └── bar.md # /bar (plugin:plugin-name) └── plugin.json # Plugin manifest ``` **Key points:** - Commands are discovered at plugin load time - No manual registration required - Commands appear in `/help` with "(plugin:plugin-name)" label - Subdirectories create namespaces ### Namespaced Plugin Commands Organize commands in subdirectories for logical grouping: ``` plugin-name/ └── commands/ ├── review/ │ ├── security.md # /security (plugin:plugin-name:review) │ └── style.md # /style (plugin:plugin-name:review) └── deploy/ ├── staging.md # /staging (plugin:plugin-name:deploy) └── prod.md # /prod (plugin:plugin-name:deploy) ``` **Namespace behavior:** - Subdirectory name becomes namespace - Shown as "(plugin:plugin-name:namespace)" in `/help` - Helps organize related commands - Use when plugin has 5+ commands ### Command Naming Conventions **Plugin command names should:** 1. Be descriptive and action-oriented 2. Avoid conflicts with common command names 3. Use hyphens for multi-word names 4. Consider prefixing with plugin name for uniqueness **Examples:** ``` Good: - /mylyn-sync (plugin-specific prefix) - /analyze-performance (descriptive action) - /docker-compose-up (clear purpose) Avoid: - /test (conflicts with common name) - /run (too generic) - /do-stuff (not descriptive) ``` ## CLAUDE_PLUGIN_ROOT Environment Variable ### Purpose `${CLAUDE_PLUGIN_ROOT}` is a special environment variable available in plugin commands that resolves to the absolute path of the plugin directory. **Why it matters:** - Enables portable paths within plugin - Allows referencing plugin files and scripts - Works across different installations - Essential for multi-file plugin operations ### Basic Usage Reference files within your plugin: ```markdown --- description: Analyze using plugin script allowed-tools: Bash(node:*), Read --- Run analysis: !`node ${CLAUDE_PLUGIN_ROOT}/scripts/analyze.js` Read template: @${CLAUDE_PLUGIN_ROOT}/templates/report.md ``` **Expands to:** ``` Run analysis: !`node /path/to/plugins/plugin-name/scripts/analyze.js` Read template: @/path/to/plugins/plugin-name/templates/report.md ``` ### Common Patterns #### 1. Executing Plugin Scripts ```markdown --- description: Run custom linter from plugin allowed-tools: Bash(node:*) --- Lint results: !`node ${CLAUDE_PLUGIN_ROOT}/bin/lint.js $1` Review the linting output and suggest fixes. ``` #### 2. Loading Configuration Files ```markdown --- description: Deploy using plugin configuration allowed-tools: Read, Bash(*) --- Configuration: @${CLAUDE_PLUGIN_ROOT}/config/deploy-config.json Deploy application using the configuration above for $1 environment. ``` #### 3. Accessing Plugin Resources ```markdown --- description: Generate report from template --- Use this template: @${CLAUDE_PLUGIN_ROOT}/templates/api-report.md Generate a report for @$1 following the template format. ``` #### 4. Multi-Step Plugin Workflows ```markdown --- description: Complete plugin workflow allowed-tools: Bash(*), Read --- Step 1 - Prepare: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/prepare.sh $1` Step 2 - Config: @${CLAUDE_PLUGIN_ROOT}/config/$1.json Step 3 - Execute: !`${CLAUDE_PLUGIN_ROOT}/bin/execute $1` Review results and report status. ``` ### Best Practices 1. **Always use for plugin-internal paths:** ```markdown # Good @${CLAUDE_PLUGIN_ROOT}/templates/foo.md # Bad @./templates/foo.md # Relative to current directory, not plugin ``` 2. **Validate file existence:** ```markdown --- description: Use plugin config if exists allowed-tools: Bash(test:*), Read --- !`test -f ${CLAUDE_PLUGIN_ROOT}/config.json && echo "exists" || echo "missing"` If config exists, load it: @${CLAUDE_PLUGIN_ROOT}/config.json Otherwise, use defaults... ``` 3. **Document plugin file structure:** ```markdown ``` 4. **Combine with arguments:** ```markdown Run: !`${CLAUDE_PLUGIN_ROOT}/bin/process.sh $1 $2` ``` ### Troubleshooting **Variable not expanding:** - Ensure command is loaded from plugin - Check bash execution is allowed - Verify syntax is exact: `${CLAUDE_PLUGIN_ROOT}` **File not found errors:** - Verify file exists in plugin directory - Check file path is correct relative to plugin root - Ensure file permissions allow reading/execution **Path with spaces:** - Bash commands automatically handle spaces - File references work with spaces in paths - No special quoting needed ## Plugin Command Patterns ### Pattern 1: Configuration-Based Commands Commands that load plugin-specific configuration: ```markdown --- description: Deploy using plugin settings allowed-tools: Read, Bash(*) --- Load configuration: @${CLAUDE_PLUGIN_ROOT}/deploy-config.json Deploy to $1 environment using: 1. Configuration settings above 2. Current git branch: !`git branch --show-current` 3. Application version: !`cat package.json | grep version` Execute deployment and monitor progress. ``` **When to use:** Commands that need consistent settings across invocations ### Pattern 2: Template-Based Generation Commands that use plugin templates: ```markdown --- description: Generate documentation from template argument-hint: [component-name] --- Template: @${CLAUDE_PLUGIN_ROOT}/templates/component-docs.md Generate documentation for $1 component following the template structure. Include: - Component purpose and usage - API reference - Examples - Testing guidelines ``` **When to use:** Standardized output generation ### Pattern 3: Multi-Script Workflow Commands that orchestrate multiple plugin scripts: ```markdown --- description: Complete build and test workflow allowed-tools: Bash(*) --- Build: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/build.sh` Validate: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/validate.sh` Test: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/test.sh` Review all outputs and report: 1. Build status 2. Validation results 3. Test results 4. Recommended next steps ``` **When to use:** Complex plugin workflows with multiple steps ### Pattern 4: Environment-Aware Commands Commands that adapt to environment: ```markdown --- description: Deploy based on environment argument-hint: [dev|staging|prod] --- Environment config: @${CLAUDE_PLUGIN_ROOT}/config/$1.json Environment check: !`echo "Deploying to: $1"` Deploy application using $1 environment configuration. Verify deployment and run smoke tests. ``` **When to use:** Commands that behave differently per environment ### Pattern 5: Plugin Data Management Commands that manage plugin-specific data: ```markdown --- description: Save analysis results to plugin cache allowed-tools: Bash(*), Read, Write --- Cache directory: ${CLAUDE_PLUGIN_ROOT}/cache/ Analyze @$1 and save results to cache: !`mkdir -p ${CLAUDE_PLUGIN_ROOT}/cache && date > ${CLAUDE_PLUGIN_ROOT}/cache/last-run.txt` Store analysis for future reference and comparison. ``` **When to use:** Commands that need persistent data storage ## Integration with Plugin Components ### Invoking Plugin Agents Commands can trigger plugin agents using the Task tool: ```markdown --- description: Deep analysis using plugin agent argument-hint: [file-path] --- Initiate deep code analysis of @$1 using the code-analyzer agent. The agent will: 1. Analyze code structure 2. Identify patterns 3. Suggest improvements 4. Generate detailed report Note: This uses the Task tool to launch the plugin's code-analyzer agent. ``` **Key points:** - Agent must be defined in plugin's `agents/` directory - Claude will automatically use Task tool to launch agent - Agent has access to same plugin resources ### Invoking Plugin Skills Commands can reference plugin skills for specialized knowledge: ```markdown --- description: API documentation with best practices argument-hint: [api-file] --- Document the API in @$1 following our API documentation standards. Use the api-docs-standards skill to ensure documentation includes: - Endpoint descriptions - Parameter specifications - Response formats - Error codes - Usage examples Note: This leverages the plugin's api-docs-standards skill for consistency. ``` **Key points:** - Skill must be defined in plugin's `skills/` directory - Mention skill by name to hint Claude should invoke it - Skills provide specialized domain knowledge ### Coordinating with Plugin Hooks Commands can be designed to work with plugin hooks: ```markdown --- description: Commit with pre-commit validation allowed-tools: Bash(git:*) --- Stage changes: !\`git add $1\` Commit changes: !\`git commit -m "$2"\` Note: This commit will trigger the plugin's pre-commit hook for validation. Review hook output for any issues. ``` **Key points:** - Hooks execute automatically on events - Commands can prepare state for hooks - Document hook interaction in command ### Multi-Component Plugin Commands Commands that coordinate multiple plugin components: ```markdown --- description: Comprehensive code review workflow argument-hint: [file-path] --- File to review: @$1 Execute comprehensive review: 1. **Static Analysis** (via plugin scripts) !`node ${CLAUDE_PLUGIN_ROOT}/scripts/lint.js $1` 2. **Deep Review** (via plugin agent) Launch the code-reviewer agent for detailed analysis. 3. **Best Practices** (via plugin skill) Use the code-standards skill to ensure compliance. 4. **Documentation** (via plugin template) Template: @${CLAUDE_PLUGIN_ROOT}/templates/review-report.md Generate final report combining all outputs. ``` **When to use:** Complex workflows leveraging multiple plugin capabilities ## Validation Patterns ### Input Validation Commands should validate inputs before processing: ```markdown --- description: Deploy to environment with validation argument-hint: [environment] --- Validate environment: !`echo "$1" | grep -E "^(dev|staging|prod)$" || echo "INVALID"` $IF($1 in [dev, staging, prod], Deploy to $1 environment using validated configuration, ERROR: Invalid environment '$1'. Must be one of: dev, staging, prod ) ``` **Validation approaches:** 1. Bash validation using grep/test 2. Inline validation in prompt 3. Script-based validation ### File Existence Checks Verify required files exist: ```markdown --- description: Process configuration file argument-hint: [config-file] --- Check file: !`test -f $1 && echo "EXISTS" || echo "MISSING"` Process configuration if file exists: @$1 If file doesn't exist, explain: - Expected location - Required format - How to create it ``` ### Required Arguments Validate required arguments provided: ```markdown --- description: Create deployment with version argument-hint: [environment] [version] --- Validate inputs: !`test -n "$1" -a -n "$2" && echo "OK" || echo "MISSING"` $IF($1 AND $2, Deploy version $2 to $1 environment, ERROR: Both environment and version required. Usage: /deploy [env] [version] ) ``` ### Plugin Resource Validation Verify plugin resources available: ```markdown --- description: Run analysis with plugin tools allowed-tools: Bash(test:*) --- Validate plugin setup: - Config exists: !`test -f ${CLAUDE_PLUGIN_ROOT}/config.json && echo "✓" || echo "✗"` - Scripts exist: !`test -d ${CLAUDE_PLUGIN_ROOT}/scripts && echo "✓" || echo "✗"` - Tools available: !`test -x ${CLAUDE_PLUGIN_ROOT}/bin/analyze && echo "✓" || echo "✗"` If all checks pass, proceed with analysis. Otherwise, report missing components and installation steps. ``` ### Output Validation Validate command execution results: ```markdown --- description: Build and validate output allowed-tools: Bash(*) --- Build: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/build.sh` Validate output: - Exit code: !`echo $?` - Output exists: !`test -d dist && echo "✓" || echo "✗"` - File count: !`find dist -type f | wc -l` Report build status and any validation failures. ``` ### Graceful Error Handling Handle errors gracefully with helpful messages: ```markdown --- description: Process file with error handling argument-hint: [file-path] --- Try processing: !`node ${CLAUDE_PLUGIN_ROOT}/scripts/process.js $1 2>&1 || echo "ERROR: $?"` If processing succeeded: - Report results - Suggest next steps If processing failed: - Explain likely causes - Provide troubleshooting steps - Suggest alternative approaches ``` ## Best Practices Summary ### Plugin Commands Should: 1. **Use ${CLAUDE_PLUGIN_ROOT} for all plugin-internal paths** - Scripts, templates, configuration, resources 2. **Validate inputs early** - Check required arguments - Verify file existence - Validate argument formats 3. **Document plugin structure** - Explain required files - Document script purposes - Clarify dependencies 4. **Integrate with plugin components** - Reference agents for complex tasks - Use skills for specialized knowledge - Coordinate with hooks when relevant 5. **Provide helpful error messages** - Explain what went wrong - Suggest how to fix - Offer alternatives 6. **Handle edge cases** - Missing files - Invalid arguments - Failed script execution - Missing dependencies 7. **Keep commands focused** - One clear purpose per command - Delegate complex logic to scripts - Use agents for multi-step workflows 8. **Test across installations** - Verify paths work everywhere - Test with different arguments - Validate error cases --- For general command development, see main SKILL.md. For command examples, see examples/ directory. ================================================ FILE: plugins/plugin-dev/skills/command-development/references/testing-strategies.md ================================================ # Command Testing Strategies Comprehensive strategies for testing slash commands before deployment and distribution. ## Overview Testing commands ensures they work correctly, handle edge cases, and provide good user experience. A systematic testing approach catches issues early and builds confidence in command reliability. ## Testing Levels ### Level 1: Syntax and Structure Validation **What to test:** - YAML frontmatter syntax - Markdown format - File location and naming **How to test:** ```bash # Validate YAML frontmatter head -n 20 .claude/commands/my-command.md | grep -A 10 "^---" # Check for closing frontmatter marker head -n 20 .claude/commands/my-command.md | grep -c "^---" # Should be 2 # Verify file has .md extension ls .claude/commands/*.md # Check file is in correct location test -f .claude/commands/my-command.md && echo "Found" || echo "Missing" ``` **Automated validation script:** ```bash #!/bin/bash # validate-command.sh COMMAND_FILE="$1" if [ ! -f "$COMMAND_FILE" ]; then echo "ERROR: File not found: $COMMAND_FILE" exit 1 fi # Check .md extension if [[ ! "$COMMAND_FILE" =~ \.md$ ]]; then echo "ERROR: File must have .md extension" exit 1 fi # Validate YAML frontmatter if present if head -n 1 "$COMMAND_FILE" | grep -q "^---"; then # Count frontmatter markers MARKERS=$(head -n 50 "$COMMAND_FILE" | grep -c "^---") if [ "$MARKERS" -ne 2 ]; then echo "ERROR: Invalid YAML frontmatter (need exactly 2 '---' markers)" exit 1 fi echo "✓ YAML frontmatter syntax valid" fi # Check for empty file if [ ! -s "$COMMAND_FILE" ]; then echo "ERROR: File is empty" exit 1 fi echo "✓ Command file structure valid" ``` ### Level 2: Frontmatter Field Validation **What to test:** - Field types correct - Values in valid ranges - Required fields present (if any) **Validation script:** ```bash #!/bin/bash # validate-frontmatter.sh COMMAND_FILE="$1" # Extract YAML frontmatter FRONTMATTER=$(sed -n '/^---$/,/^---$/p' "$COMMAND_FILE" | sed '1d;$d') if [ -z "$FRONTMATTER" ]; then echo "No frontmatter to validate" exit 0 fi # Check 'model' field if present if echo "$FRONTMATTER" | grep -q "^model:"; then MODEL=$(echo "$FRONTMATTER" | grep "^model:" | cut -d: -f2 | tr -d ' ') if ! echo "sonnet opus haiku" | grep -qw "$MODEL"; then echo "ERROR: Invalid model '$MODEL' (must be sonnet, opus, or haiku)" exit 1 fi echo "✓ Model field valid: $MODEL" fi # Check 'allowed-tools' field format if echo "$FRONTMATTER" | grep -q "^allowed-tools:"; then echo "✓ allowed-tools field present" # Could add more sophisticated validation here fi # Check 'description' length if echo "$FRONTMATTER" | grep -q "^description:"; then DESC=$(echo "$FRONTMATTER" | grep "^description:" | cut -d: -f2-) LENGTH=${#DESC} if [ "$LENGTH" -gt 80 ]; then echo "WARNING: Description length $LENGTH (recommend < 60 chars)" else echo "✓ Description length acceptable: $LENGTH chars" fi fi echo "✓ Frontmatter fields valid" ``` ### Level 3: Manual Command Invocation **What to test:** - Command appears in `/help` - Command executes without errors - Output is as expected **Test procedure:** ```bash # 1. Start Claude Code claude --debug # 2. Check command appears in help > /help # Look for your command in the list # 3. Invoke command without arguments > /my-command # Check for reasonable error or behavior # 4. Invoke with valid arguments > /my-command arg1 arg2 # Verify expected behavior # 5. Check debug logs tail -f ~/.claude/debug-logs/latest # Look for errors or warnings ``` ### Level 4: Argument Testing **What to test:** - Positional arguments work ($1, $2, etc.) - $ARGUMENTS captures all arguments - Missing arguments handled gracefully - Invalid arguments detected **Test matrix:** | Test Case | Command | Expected Result | |-----------|---------|-----------------| | No args | `/cmd` | Graceful handling or useful message | | One arg | `/cmd arg1` | $1 substituted correctly | | Two args | `/cmd arg1 arg2` | $1 and $2 substituted | | Extra args | `/cmd a b c d` | All captured or extras ignored appropriately | | Special chars | `/cmd "arg with spaces"` | Quotes handled correctly | | Empty arg | `/cmd ""` | Empty string handled | **Test script:** ```bash #!/bin/bash # test-command-arguments.sh COMMAND="$1" echo "Testing argument handling for /$COMMAND" echo echo "Test 1: No arguments" echo " Command: /$COMMAND" echo " Expected: [describe expected behavior]" echo " Manual test required" echo echo "Test 2: Single argument" echo " Command: /$COMMAND test-value" echo " Expected: 'test-value' appears in output" echo " Manual test required" echo echo "Test 3: Multiple arguments" echo " Command: /$COMMAND arg1 arg2 arg3" echo " Expected: All arguments used appropriately" echo " Manual test required" echo echo "Test 4: Special characters" echo " Command: /$COMMAND \"value with spaces\"" echo " Expected: Entire phrase captured" echo " Manual test required" ``` ### Level 5: File Reference Testing **What to test:** - @ syntax loads file contents - Non-existent files handled - Large files handled appropriately - Multiple file references work **Test procedure:** ```bash # Create test files echo "Test content" > /tmp/test-file.txt echo "Second file" > /tmp/test-file-2.txt # Test single file reference > /my-command /tmp/test-file.txt # Verify file content is read # Test non-existent file > /my-command /tmp/nonexistent.txt # Verify graceful error handling # Test multiple files > /my-command /tmp/test-file.txt /tmp/test-file-2.txt # Verify both files processed # Test large file dd if=/dev/zero of=/tmp/large-file.bin bs=1M count=100 > /my-command /tmp/large-file.bin # Verify reasonable behavior (may truncate or warn) # Cleanup rm /tmp/test-file*.txt /tmp/large-file.bin ``` ### Level 6: Bash Execution Testing **What to test:** - !` commands execute correctly - Command output included in prompt - Command failures handled - Security: only allowed commands run **Test procedure:** ```bash # Create test command with bash execution cat > .claude/commands/test-bash.md << 'EOF' --- description: Test bash execution allowed-tools: Bash(echo:*), Bash(date:*) --- Current date: !`date` Test output: !`echo "Hello from bash"` Analysis of output above... EOF # Test in Claude Code > /test-bash # Verify: # 1. Date appears correctly # 2. Echo output appears # 3. No errors in debug logs # Test with disallowed command (should fail or be blocked) cat > .claude/commands/test-forbidden.md << 'EOF' --- description: Test forbidden command allowed-tools: Bash(echo:*) --- Trying forbidden: !`ls -la /` EOF > /test-forbidden # Verify: Permission denied or appropriate error ``` ### Level 7: Integration Testing **What to test:** - Commands work with other plugin components - Commands interact correctly with each other - State management works across invocations - Workflow commands execute in sequence **Test scenarios:** **Scenario 1: Command + Hook Integration** ```bash # Setup: Command that triggers a hook # Test: Invoke command, verify hook executes # Command: .claude/commands/risky-operation.md # Hook: PreToolUse that validates the operation > /risky-operation # Verify: Hook executes and validates before command completes ``` **Scenario 2: Command Sequence** ```bash # Setup: Multi-command workflow > /workflow-init # Verify: State file created > /workflow-step2 # Verify: State file read, step 2 executes > /workflow-complete # Verify: State file cleaned up ``` **Scenario 3: Command + MCP Integration** ```bash # Setup: Command uses MCP tools # Test: Verify MCP server accessible > /mcp-command # Verify: # 1. MCP server starts (if stdio) # 2. Tool calls succeed # 3. Results included in output ``` ## Automated Testing Approaches ### Command Test Suite Create a test suite script: ```bash #!/bin/bash # test-commands.sh - Command test suite TEST_DIR=".claude/commands" FAILED_TESTS=0 echo "Command Test Suite" echo "==================" echo for cmd_file in "$TEST_DIR"/*.md; do cmd_name=$(basename "$cmd_file" .md) echo "Testing: $cmd_name" # Validate structure if ./validate-command.sh "$cmd_file"; then echo " ✓ Structure valid" else echo " ✗ Structure invalid" ((FAILED_TESTS++)) fi # Validate frontmatter if ./validate-frontmatter.sh "$cmd_file"; then echo " ✓ Frontmatter valid" else echo " ✗ Frontmatter invalid" ((FAILED_TESTS++)) fi echo done echo "==================" echo "Tests complete" echo "Failed: $FAILED_TESTS" exit $FAILED_TESTS ``` ### Pre-Commit Hook Validate commands before committing: ```bash #!/bin/bash # .git/hooks/pre-commit echo "Validating commands..." COMMANDS_CHANGED=$(git diff --cached --name-only | grep "\.claude/commands/.*\.md") if [ -z "$COMMANDS_CHANGED" ]; then echo "No commands changed" exit 0 fi for cmd in $COMMANDS_CHANGED; do echo "Checking: $cmd" if ! ./scripts/validate-command.sh "$cmd"; then echo "ERROR: Command validation failed: $cmd" exit 1 fi done echo "✓ All commands valid" ``` ### Continuous Testing Test commands in CI/CD: ```yaml # .github/workflows/test-commands.yml name: Test Commands on: [push, pull_request] jobs: test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Validate command structure run: | for cmd in .claude/commands/*.md; do echo "Testing: $cmd" ./scripts/validate-command.sh "$cmd" done - name: Validate frontmatter run: | for cmd in .claude/commands/*.md; do ./scripts/validate-frontmatter.sh "$cmd" done - name: Check for TODOs run: | if grep -r "TODO" .claude/commands/; then echo "ERROR: TODOs found in commands" exit 1 fi ``` ## Edge Case Testing ### Test Edge Cases **Empty arguments:** ```bash > /cmd "" > /cmd '' '' ``` **Special characters:** ```bash > /cmd "arg with spaces" > /cmd arg-with-dashes > /cmd arg_with_underscores > /cmd arg/with/slashes > /cmd 'arg with "quotes"' ``` **Long arguments:** ```bash > /cmd $(python -c "print('a' * 10000)") ``` **Unusual file paths:** ```bash > /cmd ./file > /cmd ../file > /cmd ~/file > /cmd "/path with spaces/file" ``` **Bash command edge cases:** ```markdown # Commands that might fail !`exit 1` !`false` !`command-that-does-not-exist` # Commands with special output !`echo ""` !`cat /dev/null` !`yes | head -n 1000000` ``` ## Performance Testing ### Response Time Testing ```bash #!/bin/bash # test-command-performance.sh COMMAND="$1" echo "Testing performance of /$COMMAND" echo for i in {1..5}; do echo "Run $i:" START=$(date +%s%N) # Invoke command (manual step - record time) echo " Invoke: /$COMMAND" echo " Start time: $START" echo " (Record end time manually)" echo done echo "Analyze results:" echo " - Average response time" echo " - Variance" echo " - Acceptable threshold: < 3 seconds for fast commands" ``` ### Resource Usage Testing ```bash # Monitor Claude Code during command execution # In terminal 1: claude --debug # In terminal 2: watch -n 1 'ps aux | grep claude' # Execute command and observe: # - Memory usage # - CPU usage # - Process count ``` ## User Experience Testing ### Usability Checklist - [ ] Command name is intuitive - [ ] Description is clear in `/help` - [ ] Arguments are well-documented - [ ] Error messages are helpful - [ ] Output is formatted readably - [ ] Long-running commands show progress - [ ] Results are actionable - [ ] Edge cases have good UX ### User Acceptance Testing Recruit testers: ```markdown # Testing Guide for Beta Testers ## Command: /my-new-command ### Test Scenarios 1. **Basic usage:** - Run: `/my-new-command` - Expected: [describe] - Rate clarity: 1-5 2. **With arguments:** - Run: `/my-new-command arg1 arg2` - Expected: [describe] - Rate usefulness: 1-5 3. **Error case:** - Run: `/my-new-command invalid-input` - Expected: Helpful error message - Rate error message: 1-5 ### Feedback Questions 1. Was the command easy to understand? 2. Did the output meet your expectations? 3. What would you change? 4. Would you use this command regularly? ``` ## Testing Checklist Before releasing a command: ### Structure - [ ] File in correct location - [ ] Correct .md extension - [ ] Valid YAML frontmatter (if present) - [ ] Markdown syntax correct ### Functionality - [ ] Command appears in `/help` - [ ] Description is clear - [ ] Command executes without errors - [ ] Arguments work as expected - [ ] File references work - [ ] Bash execution works (if used) ### Edge Cases - [ ] Missing arguments handled - [ ] Invalid arguments detected - [ ] Non-existent files handled - [ ] Special characters work - [ ] Long inputs handled ### Integration - [ ] Works with other commands - [ ] Works with hooks (if applicable) - [ ] Works with MCP (if applicable) - [ ] State management works ### Quality - [ ] Performance acceptable - [ ] No security issues - [ ] Error messages helpful - [ ] Output formatted well - [ ] Documentation complete ### Distribution - [ ] Tested by others - [ ] Feedback incorporated - [ ] README updated - [ ] Examples provided ## Debugging Failed Tests ### Common Issues and Solutions **Issue: Command not appearing in /help** ```bash # Check file location ls -la .claude/commands/my-command.md # Check permissions chmod 644 .claude/commands/my-command.md # Check syntax head -n 20 .claude/commands/my-command.md # Restart Claude Code claude --debug ``` **Issue: Arguments not substituting** ```bash # Verify syntax grep '\$1' .claude/commands/my-command.md grep '\$ARGUMENTS' .claude/commands/my-command.md # Test with simple command first echo "Test: \$1 and \$2" > .claude/commands/test-args.md ``` **Issue: Bash commands not executing** ```bash # Check allowed-tools grep "allowed-tools" .claude/commands/my-command.md # Verify command syntax grep '!\`' .claude/commands/my-command.md # Test command manually date echo "test" ``` **Issue: File references not working** ```bash # Check @ syntax grep '@' .claude/commands/my-command.md # Verify file exists ls -la /path/to/referenced/file # Check permissions chmod 644 /path/to/referenced/file ``` ## Best Practices 1. **Test early, test often**: Validate as you develop 2. **Automate validation**: Use scripts for repeatable checks 3. **Test edge cases**: Don't just test the happy path 4. **Get feedback**: Have others test before wide release 5. **Document tests**: Keep test scenarios for regression testing 6. **Monitor in production**: Watch for issues after release 7. **Iterate**: Improve based on real usage data ================================================ FILE: plugins/plugin-dev/skills/hook-development/SKILL.md ================================================ --- name: Hook Development description: This skill should be used when the user asks to "create a hook", "add a PreToolUse/PostToolUse/Stop hook", "validate tool use", "implement prompt-based hooks", "use ${CLAUDE_PLUGIN_ROOT}", "set up event-driven automation", "block dangerous commands", or mentions hook events (PreToolUse, PostToolUse, Stop, SubagentStop, SessionStart, SessionEnd, UserPromptSubmit, PreCompact, Notification). Provides comprehensive guidance for creating and implementing Claude Code plugin hooks with focus on advanced prompt-based hooks API. version: 0.1.0 --- # Hook Development for Claude Code Plugins ## Overview Hooks are event-driven automation scripts that execute in response to Claude Code events. Use hooks to validate operations, enforce policies, add context, and integrate external tools into workflows. **Key capabilities:** - Validate tool calls before execution (PreToolUse) - React to tool results (PostToolUse) - Enforce completion standards (Stop, SubagentStop) - Load project context (SessionStart) - Automate workflows across the development lifecycle ## Hook Types ### Prompt-Based Hooks (Recommended) Use LLM-driven decision making for context-aware validation: ```json { "type": "prompt", "prompt": "Evaluate if this tool use is appropriate: $TOOL_INPUT", "timeout": 30 } ``` **Supported events:** Stop, SubagentStop, UserPromptSubmit, PreToolUse **Benefits:** - Context-aware decisions based on natural language reasoning - Flexible evaluation logic without bash scripting - Better edge case handling - Easier to maintain and extend ### Command Hooks Execute bash commands for deterministic checks: ```json { "type": "command", "command": "bash ${CLAUDE_PLUGIN_ROOT}/scripts/validate.sh", "timeout": 60 } ``` **Use for:** - Fast deterministic validations - File system operations - External tool integrations - Performance-critical checks ## Hook Configuration Formats ### Plugin hooks.json Format **For plugin hooks** in `hooks/hooks.json`, use wrapper format: ```json { "description": "Brief explanation of hooks (optional)", "hooks": { "PreToolUse": [...], "Stop": [...], "SessionStart": [...] } } ``` **Key points:** - `description` field is optional - `hooks` field is required wrapper containing actual hook events - This is the **plugin-specific format** **Example:** ```json { "description": "Validation hooks for code quality", "hooks": { "PreToolUse": [ { "matcher": "Write", "hooks": [ { "type": "command", "command": "${CLAUDE_PLUGIN_ROOT}/hooks/validate.sh" } ] } ] } } ``` ### Settings Format (Direct) **For user settings** in `.claude/settings.json`, use direct format: ```json { "PreToolUse": [...], "Stop": [...], "SessionStart": [...] } ``` **Key points:** - No wrapper - events directly at top level - No description field - This is the **settings format** **Important:** The examples below show the hook event structure that goes inside either format. For plugin hooks.json, wrap these in `{"hooks": {...}}`. ## Hook Events ### PreToolUse Execute before any tool runs. Use to approve, deny, or modify tool calls. **Example (prompt-based):** ```json { "PreToolUse": [ { "matcher": "Write|Edit", "hooks": [ { "type": "prompt", "prompt": "Validate file write safety. Check: system paths, credentials, path traversal, sensitive content. Return 'approve' or 'deny'." } ] } ] } ``` **Output for PreToolUse:** ```json { "hookSpecificOutput": { "permissionDecision": "allow|deny|ask", "updatedInput": {"field": "modified_value"} }, "systemMessage": "Explanation for Claude" } ``` ### PostToolUse Execute after tool completes. Use to react to results, provide feedback, or log. **Example:** ```json { "PostToolUse": [ { "matcher": "Edit", "hooks": [ { "type": "prompt", "prompt": "Analyze edit result for potential issues: syntax errors, security vulnerabilities, breaking changes. Provide feedback." } ] } ] } ``` **Output behavior:** - Exit 0: stdout shown in transcript - Exit 2: stderr fed back to Claude - systemMessage included in context ### Stop Execute when main agent considers stopping. Use to validate completeness. **Example:** ```json { "Stop": [ { "matcher": "*", "hooks": [ { "type": "prompt", "prompt": "Verify task completion: tests run, build succeeded, questions answered. Return 'approve' to stop or 'block' with reason to continue." } ] } ] } ``` **Decision output:** ```json { "decision": "approve|block", "reason": "Explanation", "systemMessage": "Additional context" } ``` ### SubagentStop Execute when subagent considers stopping. Use to ensure subagent completed its task. Similar to Stop hook, but for subagents. ### UserPromptSubmit Execute when user submits a prompt. Use to add context, validate, or block prompts. **Example:** ```json { "UserPromptSubmit": [ { "matcher": "*", "hooks": [ { "type": "prompt", "prompt": "Check if prompt requires security guidance. If discussing auth, permissions, or API security, return relevant warnings." } ] } ] } ``` ### SessionStart Execute when Claude Code session begins. Use to load context and set environment. **Example:** ```json { "SessionStart": [ { "matcher": "*", "hooks": [ { "type": "command", "command": "bash ${CLAUDE_PLUGIN_ROOT}/scripts/load-context.sh" } ] } ] } ``` **Special capability:** Persist environment variables using `$CLAUDE_ENV_FILE`: ```bash echo "export PROJECT_TYPE=nodejs" >> "$CLAUDE_ENV_FILE" ``` See `examples/load-context.sh` for complete example. ### SessionEnd Execute when session ends. Use for cleanup, logging, and state preservation. ### PreCompact Execute before context compaction. Use to add critical information to preserve. ### Notification Execute when Claude sends notifications. Use to react to user notifications. ## Hook Output Format ### Standard Output (All Hooks) ```json { "continue": true, "suppressOutput": false, "systemMessage": "Message for Claude" } ``` - `continue`: If false, halt processing (default true) - `suppressOutput`: Hide output from transcript (default false) - `systemMessage`: Message shown to Claude ### Exit Codes - `0` - Success (stdout shown in transcript) - `2` - Blocking error (stderr fed back to Claude) - Other - Non-blocking error ## Hook Input Format All hooks receive JSON via stdin with common fields: ```json { "session_id": "abc123", "transcript_path": "/path/to/transcript.txt", "cwd": "/current/working/dir", "permission_mode": "ask|allow", "hook_event_name": "PreToolUse" } ``` **Event-specific fields:** - **PreToolUse/PostToolUse:** `tool_name`, `tool_input`, `tool_result` - **UserPromptSubmit:** `user_prompt` - **Stop/SubagentStop:** `reason` Access fields in prompts using `$TOOL_INPUT`, `$TOOL_RESULT`, `$USER_PROMPT`, etc. ## Environment Variables Available in all command hooks: - `$CLAUDE_PROJECT_DIR` - Project root path - `$CLAUDE_PLUGIN_ROOT` - Plugin directory (use for portable paths) - `$CLAUDE_ENV_FILE` - SessionStart only: persist env vars here - `$CLAUDE_CODE_REMOTE` - Set if running in remote context **Always use ${CLAUDE_PLUGIN_ROOT} in hook commands for portability:** ```json { "type": "command", "command": "bash ${CLAUDE_PLUGIN_ROOT}/scripts/validate.sh" } ``` ## Plugin Hook Configuration In plugins, define hooks in `hooks/hooks.json`: ```json { "PreToolUse": [ { "matcher": "Write|Edit", "hooks": [ { "type": "prompt", "prompt": "Validate file write safety" } ] } ], "Stop": [ { "matcher": "*", "hooks": [ { "type": "prompt", "prompt": "Verify task completion" } ] } ], "SessionStart": [ { "matcher": "*", "hooks": [ { "type": "command", "command": "bash ${CLAUDE_PLUGIN_ROOT}/scripts/load-context.sh", "timeout": 10 } ] } ] } ``` Plugin hooks merge with user's hooks and run in parallel. ## Matchers ### Tool Name Matching **Exact match:** ```json "matcher": "Write" ``` **Multiple tools:** ```json "matcher": "Read|Write|Edit" ``` **Wildcard (all tools):** ```json "matcher": "*" ``` **Regex patterns:** ```json "matcher": "mcp__.*__delete.*" // All MCP delete tools ``` **Note:** Matchers are case-sensitive. ### Common Patterns ```json // All MCP tools "matcher": "mcp__.*" // Specific plugin's MCP tools "matcher": "mcp__plugin_asana_.*" // All file operations "matcher": "Read|Write|Edit" // Bash commands only "matcher": "Bash" ``` ## Security Best Practices ### Input Validation Always validate inputs in command hooks: ```bash #!/bin/bash set -euo pipefail input=$(cat) tool_name=$(echo "$input" | jq -r '.tool_name') # Validate tool name format if [[ ! "$tool_name" =~ ^[a-zA-Z0-9_]+$ ]]; then echo '{"decision": "deny", "reason": "Invalid tool name"}' >&2 exit 2 fi ``` ### Path Safety Check for path traversal and sensitive files: ```bash file_path=$(echo "$input" | jq -r '.tool_input.file_path') # Deny path traversal if [[ "$file_path" == *".."* ]]; then echo '{"decision": "deny", "reason": "Path traversal detected"}' >&2 exit 2 fi # Deny sensitive files if [[ "$file_path" == *".env"* ]]; then echo '{"decision": "deny", "reason": "Sensitive file"}' >&2 exit 2 fi ``` See `examples/validate-write.sh` and `examples/validate-bash.sh` for complete examples. ### Quote All Variables ```bash # GOOD: Quoted echo "$file_path" cd "$CLAUDE_PROJECT_DIR" # BAD: Unquoted (injection risk) echo $file_path cd $CLAUDE_PROJECT_DIR ``` ### Set Appropriate Timeouts ```json { "type": "command", "command": "bash script.sh", "timeout": 10 } ``` **Defaults:** Command hooks (60s), Prompt hooks (30s) ## Performance Considerations ### Parallel Execution All matching hooks run **in parallel**: ```json { "PreToolUse": [ { "matcher": "Write", "hooks": [ {"type": "command", "command": "check1.sh"}, // Parallel {"type": "command", "command": "check2.sh"}, // Parallel {"type": "prompt", "prompt": "Validate..."} // Parallel ] } ] } ``` **Design implications:** - Hooks don't see each other's output - Non-deterministic ordering - Design for independence ### Optimization 1. Use command hooks for quick deterministic checks 2. Use prompt hooks for complex reasoning 3. Cache validation results in temp files 4. Minimize I/O in hot paths ## Temporarily Active Hooks Create hooks that activate conditionally by checking for a flag file or configuration: **Pattern: Flag file activation** ```bash #!/bin/bash # Only active when flag file exists FLAG_FILE="$CLAUDE_PROJECT_DIR/.enable-strict-validation" if [ ! -f "$FLAG_FILE" ]; then # Flag not present, skip validation exit 0 fi # Flag present, run validation input=$(cat) # ... validation logic ... ``` **Pattern: Configuration-based activation** ```bash #!/bin/bash # Check configuration for activation CONFIG_FILE="$CLAUDE_PROJECT_DIR/.claude/plugin-config.json" if [ -f "$CONFIG_FILE" ]; then enabled=$(jq -r '.strictMode // false' "$CONFIG_FILE") if [ "$enabled" != "true" ]; then exit 0 # Not enabled, skip fi fi # Enabled, run hook logic input=$(cat) # ... hook logic ... ``` **Use cases:** - Enable strict validation only when needed - Temporary debugging hooks - Project-specific hook behavior - Feature flags for hooks **Best practice:** Document activation mechanism in plugin README so users know how to enable/disable temporary hooks. ## Hook Lifecycle and Limitations ### Hooks Load at Session Start **Important:** Hooks are loaded when Claude Code session starts. Changes to hook configuration require restarting Claude Code. **Cannot hot-swap hooks:** - Editing `hooks/hooks.json` won't affect current session - Adding new hook scripts won't be recognized - Changing hook commands/prompts won't update - Must restart Claude Code: exit and run `claude` again **To test hook changes:** 1. Edit hook configuration or scripts 2. Exit Claude Code session 3. Restart: `claude` or `cc` 4. New hook configuration loads 5. Test hooks with `claude --debug` ### Hook Validation at Startup Hooks are validated when Claude Code starts: - Invalid JSON in hooks.json causes loading failure - Missing scripts cause warnings - Syntax errors reported in debug mode Use `/hooks` command to review loaded hooks in current session. ## Debugging Hooks ### Enable Debug Mode ```bash claude --debug ``` Look for hook registration, execution logs, input/output JSON, and timing information. ### Test Hook Scripts Test command hooks directly: ```bash echo '{"tool_name": "Write", "tool_input": {"file_path": "/test"}}' | \ bash ${CLAUDE_PLUGIN_ROOT}/scripts/validate.sh echo "Exit code: $?" ``` ### Validate JSON Output Ensure hooks output valid JSON: ```bash output=$(./your-hook.sh < test-input.json) echo "$output" | jq . ``` ## Quick Reference ### Hook Events Summary | Event | When | Use For | |-------|------|---------| | PreToolUse | Before tool | Validation, modification | | PostToolUse | After tool | Feedback, logging | | UserPromptSubmit | User input | Context, validation | | Stop | Agent stopping | Completeness check | | SubagentStop | Subagent done | Task validation | | SessionStart | Session begins | Context loading | | SessionEnd | Session ends | Cleanup, logging | | PreCompact | Before compact | Preserve context | | Notification | User notified | Logging, reactions | ### Best Practices **DO:** - ✅ Use prompt-based hooks for complex logic - ✅ Use ${CLAUDE_PLUGIN_ROOT} for portability - ✅ Validate all inputs in command hooks - ✅ Quote all bash variables - ✅ Set appropriate timeouts - ✅ Return structured JSON output - ✅ Test hooks thoroughly **DON'T:** - ❌ Use hardcoded paths - ❌ Trust user input without validation - ❌ Create long-running hooks - ❌ Rely on hook execution order - ❌ Modify global state unpredictably - ❌ Log sensitive information ## Additional Resources ### Reference Files For detailed patterns and advanced techniques, consult: - **`references/patterns.md`** - Common hook patterns (8+ proven patterns) - **`references/migration.md`** - Migrating from basic to advanced hooks - **`references/advanced.md`** - Advanced use cases and techniques ### Example Hook Scripts Working examples in `examples/`: - **`validate-write.sh`** - File write validation example - **`validate-bash.sh`** - Bash command validation example - **`load-context.sh`** - SessionStart context loading example ### Utility Scripts Development tools in `scripts/`: - **`validate-hook-schema.sh`** - Validate hooks.json structure and syntax - **`test-hook.sh`** - Test hooks with sample input before deployment - **`hook-linter.sh`** - Check hook scripts for common issues and best practices ### External Resources - **Official Docs**: https://docs.claude.com/en/docs/claude-code/hooks - **Examples**: See security-guidance plugin in marketplace - **Testing**: Use `claude --debug` for detailed logs - **Validation**: Use `jq` to validate hook JSON output ## Implementation Workflow To implement hooks in a plugin: 1. Identify events to hook into (PreToolUse, Stop, SessionStart, etc.) 2. Decide between prompt-based (flexible) or command (deterministic) hooks 3. Write hook configuration in `hooks/hooks.json` 4. For command hooks, create hook scripts 5. Use ${CLAUDE_PLUGIN_ROOT} for all file references 6. Validate configuration with `scripts/validate-hook-schema.sh hooks/hooks.json` 7. Test hooks with `scripts/test-hook.sh` before deployment 8. Test in Claude Code with `claude --debug` 9. Document hooks in plugin README Focus on prompt-based hooks for most use cases. Reserve command hooks for performance-critical or deterministic checks. ================================================ FILE: plugins/plugin-dev/skills/hook-development/examples/load-context.sh ================================================ #!/bin/bash # Example SessionStart hook for loading project context # This script detects project type and sets environment variables set -euo pipefail # Navigate to project directory cd "$CLAUDE_PROJECT_DIR" || exit 1 echo "Loading project context..." # Detect project type and set environment if [ -f "package.json" ]; then echo "📦 Node.js project detected" echo "export PROJECT_TYPE=nodejs" >> "$CLAUDE_ENV_FILE" # Check if TypeScript if [ -f "tsconfig.json" ]; then echo "export USES_TYPESCRIPT=true" >> "$CLAUDE_ENV_FILE" fi elif [ -f "Cargo.toml" ]; then echo "🦀 Rust project detected" echo "export PROJECT_TYPE=rust" >> "$CLAUDE_ENV_FILE" elif [ -f "go.mod" ]; then echo "🐹 Go project detected" echo "export PROJECT_TYPE=go" >> "$CLAUDE_ENV_FILE" elif [ -f "pyproject.toml" ] || [ -f "setup.py" ]; then echo "🐍 Python project detected" echo "export PROJECT_TYPE=python" >> "$CLAUDE_ENV_FILE" elif [ -f "pom.xml" ]; then echo "☕ Java (Maven) project detected" echo "export PROJECT_TYPE=java" >> "$CLAUDE_ENV_FILE" echo "export BUILD_SYSTEM=maven" >> "$CLAUDE_ENV_FILE" elif [ -f "build.gradle" ] || [ -f "build.gradle.kts" ]; then echo "☕ Java/Kotlin (Gradle) project detected" echo "export PROJECT_TYPE=java" >> "$CLAUDE_ENV_FILE" echo "export BUILD_SYSTEM=gradle" >> "$CLAUDE_ENV_FILE" else echo "❓ Unknown project type" echo "export PROJECT_TYPE=unknown" >> "$CLAUDE_ENV_FILE" fi # Check for CI configuration if [ -f ".github/workflows" ] || [ -f ".gitlab-ci.yml" ] || [ -f ".circleci/config.yml" ]; then echo "export HAS_CI=true" >> "$CLAUDE_ENV_FILE" fi echo "Project context loaded successfully" exit 0 ================================================ FILE: plugins/plugin-dev/skills/hook-development/examples/validate-bash.sh ================================================ #!/bin/bash # Example PreToolUse hook for validating Bash commands # This script demonstrates bash command validation patterns set -euo pipefail # Read input from stdin input=$(cat) # Extract command command=$(echo "$input" | jq -r '.tool_input.command // empty') # Validate command exists if [ -z "$command" ]; then echo '{"continue": true}' # No command to validate exit 0 fi # Check for obviously safe commands (quick approval) if [[ "$command" =~ ^(ls|pwd|echo|date|whoami)(\s|$) ]]; then exit 0 fi # Check for destructive operations if [[ "$command" == *"rm -rf"* ]] || [[ "$command" == *"rm -fr"* ]]; then echo '{"hookSpecificOutput": {"permissionDecision": "deny"}, "systemMessage": "Dangerous command detected: rm -rf"}' >&2 exit 2 fi # Check for other dangerous commands if [[ "$command" == *"dd if="* ]] || [[ "$command" == *"mkfs"* ]] || [[ "$command" == *"> /dev/"* ]]; then echo '{"hookSpecificOutput": {"permissionDecision": "deny"}, "systemMessage": "Dangerous system operation detected"}' >&2 exit 2 fi # Check for privilege escalation if [[ "$command" == sudo* ]] || [[ "$command" == su* ]]; then echo '{"hookSpecificOutput": {"permissionDecision": "ask"}, "systemMessage": "Command requires elevated privileges"}' >&2 exit 2 fi # Approve the operation exit 0 ================================================ FILE: plugins/plugin-dev/skills/hook-development/examples/validate-write.sh ================================================ #!/bin/bash # Example PreToolUse hook for validating Write/Edit operations # This script demonstrates file write validation patterns set -euo pipefail # Read input from stdin input=$(cat) # Extract file path and content file_path=$(echo "$input" | jq -r '.tool_input.file_path // empty') # Validate path exists if [ -z "$file_path" ]; then echo '{"continue": true}' # No path to validate exit 0 fi # Check for path traversal if [[ "$file_path" == *".."* ]]; then echo '{"hookSpecificOutput": {"permissionDecision": "deny"}, "systemMessage": "Path traversal detected in: '"$file_path"'"}' >&2 exit 2 fi # Check for system directories if [[ "$file_path" == /etc/* ]] || [[ "$file_path" == /sys/* ]] || [[ "$file_path" == /usr/* ]]; then echo '{"hookSpecificOutput": {"permissionDecision": "deny"}, "systemMessage": "Cannot write to system directory: '"$file_path"'"}' >&2 exit 2 fi # Check for sensitive files if [[ "$file_path" == *.env ]] || [[ "$file_path" == *secret* ]] || [[ "$file_path" == *credentials* ]]; then echo '{"hookSpecificOutput": {"permissionDecision": "ask"}, "systemMessage": "Writing to potentially sensitive file: '"$file_path"'"}' >&2 exit 2 fi # Approve the operation exit 0 ================================================ FILE: plugins/plugin-dev/skills/hook-development/references/advanced.md ================================================ # Advanced Hook Use Cases This reference covers advanced hook patterns and techniques for sophisticated automation workflows. ## Multi-Stage Validation Combine command and prompt hooks for layered validation: ```json { "PreToolUse": [ { "matcher": "Bash", "hooks": [ { "type": "command", "command": "bash ${CLAUDE_PLUGIN_ROOT}/scripts/quick-check.sh", "timeout": 5 }, { "type": "prompt", "prompt": "Deep analysis of bash command: $TOOL_INPUT", "timeout": 15 } ] } ] } ``` **Use case:** Fast deterministic checks followed by intelligent analysis **Example quick-check.sh:** ```bash #!/bin/bash input=$(cat) command=$(echo "$input" | jq -r '.tool_input.command') # Immediate approval for safe commands if [[ "$command" =~ ^(ls|pwd|echo|date|whoami)$ ]]; then exit 0 fi # Let prompt hook handle complex cases exit 0 ``` The command hook quickly approves obviously safe commands, while the prompt hook analyzes everything else. ## Conditional Hook Execution Execute hooks based on environment or context: ```bash #!/bin/bash # Only run in CI environment if [ -z "$CI" ]; then echo '{"continue": true}' # Skip in non-CI exit 0 fi # Run validation logic in CI input=$(cat) # ... validation code ... ``` **Use cases:** - Different behavior in CI vs local development - Project-specific validation - User-specific rules **Example: Skip certain checks for trusted users:** ```bash #!/bin/bash # Skip detailed checks for admin users if [ "$USER" = "admin" ]; then exit 0 fi # Full validation for other users input=$(cat) # ... validation code ... ``` ## Hook Chaining via State Share state between hooks using temporary files: ```bash # Hook 1: Analyze and save state #!/bin/bash input=$(cat) command=$(echo "$input" | jq -r '.tool_input.command') # Analyze command risk_level=$(calculate_risk "$command") echo "$risk_level" > /tmp/hook-state-$$ exit 0 ``` ```bash # Hook 2: Use saved state #!/bin/bash risk_level=$(cat /tmp/hook-state-$$ 2>/dev/null || echo "unknown") if [ "$risk_level" = "high" ]; then echo "High risk operation detected" >&2 exit 2 fi ``` **Important:** This only works for sequential hook events (e.g., PreToolUse then PostToolUse), not parallel hooks. ## Dynamic Hook Configuration Modify hook behavior based on project configuration: ```bash #!/bin/bash cd "$CLAUDE_PROJECT_DIR" || exit 1 # Read project-specific config if [ -f ".claude-hooks-config.json" ]; then strict_mode=$(jq -r '.strict_mode' .claude-hooks-config.json) if [ "$strict_mode" = "true" ]; then # Apply strict validation # ... else # Apply lenient validation # ... fi fi ``` **Example .claude-hooks-config.json:** ```json { "strict_mode": true, "allowed_commands": ["ls", "pwd", "grep"], "forbidden_paths": ["/etc", "/sys"] } ``` ## Context-Aware Prompt Hooks Use transcript and session context for intelligent decisions: ```json { "Stop": [ { "matcher": "*", "hooks": [ { "type": "prompt", "prompt": "Review the full transcript at $TRANSCRIPT_PATH. Check: 1) Were tests run after code changes? 2) Did the build succeed? 3) Were all user questions answered? 4) Is there any unfinished work? Return 'approve' only if everything is complete." } ] } ] } ``` The LLM can read the transcript file and make context-aware decisions. ## Performance Optimization ### Caching Validation Results ```bash #!/bin/bash input=$(cat) file_path=$(echo "$input" | jq -r '.tool_input.file_path') cache_key=$(echo -n "$file_path" | md5sum | cut -d' ' -f1) cache_file="/tmp/hook-cache-$cache_key" # Check cache if [ -f "$cache_file" ]; then cache_age=$(($(date +%s) - $(stat -f%m "$cache_file" 2>/dev/null || stat -c%Y "$cache_file"))) if [ "$cache_age" -lt 300 ]; then # 5 minute cache cat "$cache_file" exit 0 fi fi # Perform validation result='{"decision": "approve"}' # Cache result echo "$result" > "$cache_file" echo "$result" ``` ### Parallel Execution Optimization Since hooks run in parallel, design them to be independent: ```json { "PreToolUse": [ { "matcher": "Write", "hooks": [ { "type": "command", "command": "bash check-size.sh", // Independent "timeout": 2 }, { "type": "command", "command": "bash check-path.sh", // Independent "timeout": 2 }, { "type": "prompt", "prompt": "Check content safety", // Independent "timeout": 10 } ] } ] } ``` All three hooks run simultaneously, reducing total latency. ## Cross-Event Workflows Coordinate hooks across different events: **SessionStart - Set up tracking:** ```bash #!/bin/bash # Initialize session tracking echo "0" > /tmp/test-count-$$ echo "0" > /tmp/build-count-$$ ``` **PostToolUse - Track events:** ```bash #!/bin/bash input=$(cat) tool_name=$(echo "$input" | jq -r '.tool_name') if [ "$tool_name" = "Bash" ]; then command=$(echo "$input" | jq -r '.tool_result') if [[ "$command" == *"test"* ]]; then count=$(cat /tmp/test-count-$$ 2>/dev/null || echo "0") echo $((count + 1)) > /tmp/test-count-$$ fi fi ``` **Stop - Verify based on tracking:** ```bash #!/bin/bash test_count=$(cat /tmp/test-count-$$ 2>/dev/null || echo "0") if [ "$test_count" -eq 0 ]; then echo '{"decision": "block", "reason": "No tests were run"}' >&2 exit 2 fi ``` ## Integration with External Systems ### Slack Notifications ```bash #!/bin/bash input=$(cat) tool_name=$(echo "$input" | jq -r '.tool_name') decision="blocked" # Send notification to Slack curl -X POST "$SLACK_WEBHOOK" \ -H 'Content-Type: application/json' \ -d "{\"text\": \"Hook ${decision} ${tool_name} operation\"}" \ 2>/dev/null echo '{"decision": "deny"}' >&2 exit 2 ``` ### Database Logging ```bash #!/bin/bash input=$(cat) # Log to database psql "$DATABASE_URL" -c "INSERT INTO hook_logs (event, data) VALUES ('PreToolUse', '$input')" \ 2>/dev/null exit 0 ``` ### Metrics Collection ```bash #!/bin/bash input=$(cat) tool_name=$(echo "$input" | jq -r '.tool_name') # Send metrics to monitoring system echo "hook.pretooluse.${tool_name}:1|c" | nc -u -w1 statsd.local 8125 exit 0 ``` ## Security Patterns ### Rate Limiting ```bash #!/bin/bash input=$(cat) command=$(echo "$input" | jq -r '.tool_input.command') # Track command frequency rate_file="/tmp/hook-rate-$$" current_minute=$(date +%Y%m%d%H%M) if [ -f "$rate_file" ]; then last_minute=$(head -1 "$rate_file") count=$(tail -1 "$rate_file") if [ "$current_minute" = "$last_minute" ]; then if [ "$count" -gt 10 ]; then echo '{"decision": "deny", "reason": "Rate limit exceeded"}' >&2 exit 2 fi count=$((count + 1)) else count=1 fi else count=1 fi echo "$current_minute" > "$rate_file" echo "$count" >> "$rate_file" exit 0 ``` ### Audit Logging ```bash #!/bin/bash input=$(cat) tool_name=$(echo "$input" | jq -r '.tool_name') timestamp=$(date -Iseconds) # Append to audit log echo "$timestamp | $USER | $tool_name | $input" >> ~/.claude/audit.log exit 0 ``` ### Secret Detection ```bash #!/bin/bash input=$(cat) content=$(echo "$input" | jq -r '.tool_input.content') # Check for common secret patterns if echo "$content" | grep -qE "(api[_-]?key|password|secret|token).{0,20}['\"]?[A-Za-z0-9]{20,}"; then echo '{"decision": "deny", "reason": "Potential secret detected in content"}' >&2 exit 2 fi exit 0 ``` ## Testing Advanced Hooks ### Unit Testing Hook Scripts ```bash # test-hook.sh #!/bin/bash # Test 1: Approve safe command result=$(echo '{"tool_input": {"command": "ls"}}' | bash validate-bash.sh) if [ $? -eq 0 ]; then echo "✓ Test 1 passed" else echo "✗ Test 1 failed" fi # Test 2: Block dangerous command result=$(echo '{"tool_input": {"command": "rm -rf /"}}' | bash validate-bash.sh) if [ $? -eq 2 ]; then echo "✓ Test 2 passed" else echo "✗ Test 2 failed" fi ``` ### Integration Testing Create test scenarios that exercise the full hook workflow: ```bash # integration-test.sh #!/bin/bash # Set up test environment export CLAUDE_PROJECT_DIR="/tmp/test-project" export CLAUDE_PLUGIN_ROOT="$(pwd)" mkdir -p "$CLAUDE_PROJECT_DIR" # Test SessionStart hook echo '{}' | bash hooks/session-start.sh if [ -f "/tmp/session-initialized" ]; then echo "✓ SessionStart hook works" else echo "✗ SessionStart hook failed" fi # Clean up rm -rf "$CLAUDE_PROJECT_DIR" ``` ## Best Practices for Advanced Hooks 1. **Keep hooks independent**: Don't rely on execution order 2. **Use timeouts**: Set appropriate limits for each hook type 3. **Handle errors gracefully**: Provide clear error messages 4. **Document complexity**: Explain advanced patterns in README 5. **Test thoroughly**: Cover edge cases and failure modes 6. **Monitor performance**: Track hook execution time 7. **Version configuration**: Use version control for hook configs 8. **Provide escape hatches**: Allow users to bypass hooks when needed ## Common Pitfalls ### ❌ Assuming Hook Order ```bash # BAD: Assumes hooks run in specific order # Hook 1 saves state, Hook 2 reads it # This can fail because hooks run in parallel! ``` ### ❌ Long-Running Hooks ```bash # BAD: Hook takes 2 minutes to run sleep 120 # This will timeout and block the workflow ``` ### ❌ Uncaught Exceptions ```bash # BAD: Script crashes on unexpected input file_path=$(echo "$input" | jq -r '.tool_input.file_path') cat "$file_path" # Fails if file doesn't exist ``` ### ✅ Proper Error Handling ```bash # GOOD: Handles errors gracefully file_path=$(echo "$input" | jq -r '.tool_input.file_path') if [ ! -f "$file_path" ]; then echo '{"continue": true, "systemMessage": "File not found, skipping check"}' >&2 exit 0 fi ``` ## Conclusion Advanced hook patterns enable sophisticated automation while maintaining reliability and performance. Use these techniques when basic hooks are insufficient, but always prioritize simplicity and maintainability. ================================================ FILE: plugins/plugin-dev/skills/hook-development/references/migration.md ================================================ # Migrating from Basic to Advanced Hooks This guide shows how to migrate from basic command hooks to advanced prompt-based hooks for better maintainability and flexibility. ## Why Migrate? Prompt-based hooks offer several advantages: - **Natural language reasoning**: LLM understands context and intent - **Better edge case handling**: Adapts to unexpected scenarios - **No bash scripting required**: Simpler to write and maintain - **More flexible validation**: Can handle complex logic without coding ## Migration Example: Bash Command Validation ### Before (Basic Command Hook) **Configuration:** ```json { "PreToolUse": [ { "matcher": "Bash", "hooks": [ { "type": "command", "command": "bash validate-bash.sh" } ] } ] } ``` **Script (validate-bash.sh):** ```bash #!/bin/bash input=$(cat) command=$(echo "$input" | jq -r '.tool_input.command') # Hard-coded validation logic if [[ "$command" == *"rm -rf"* ]]; then echo "Dangerous command detected" >&2 exit 2 fi ``` **Problems:** - Only checks for exact "rm -rf" pattern - Doesn't catch variations like `rm -fr` or `rm -r -f` - Misses other dangerous commands (`dd`, `mkfs`, etc.) - No context awareness - Requires bash scripting knowledge ### After (Advanced Prompt Hook) **Configuration:** ```json { "PreToolUse": [ { "matcher": "Bash", "hooks": [ { "type": "prompt", "prompt": "Command: $TOOL_INPUT.command. Analyze for: 1) Destructive operations (rm -rf, dd, mkfs, etc) 2) Privilege escalation (sudo) 3) Network operations without user consent. Return 'approve' or 'deny' with explanation.", "timeout": 15 } ] } ] } ``` **Benefits:** - Catches all variations and patterns - Understands intent, not just literal strings - No script file needed - Easy to extend with new criteria - Context-aware decisions - Natural language explanation in denial ## Migration Example: File Write Validation ### Before (Basic Command Hook) **Configuration:** ```json { "PreToolUse": [ { "matcher": "Write", "hooks": [ { "type": "command", "command": "bash validate-write.sh" } ] } ] } ``` **Script (validate-write.sh):** ```bash #!/bin/bash input=$(cat) file_path=$(echo "$input" | jq -r '.tool_input.file_path') # Check for path traversal if [[ "$file_path" == *".."* ]]; then echo '{"decision": "deny", "reason": "Path traversal detected"}' >&2 exit 2 fi # Check for system paths if [[ "$file_path" == "/etc/"* ]] || [[ "$file_path" == "/sys/"* ]]; then echo '{"decision": "deny", "reason": "System file"}' >&2 exit 2 fi ``` **Problems:** - Hard-coded path patterns - Doesn't understand symlinks - Missing edge cases (e.g., `/etc` vs `/etc/`) - No consideration of file content ### After (Advanced Prompt Hook) **Configuration:** ```json { "PreToolUse": [ { "matcher": "Write|Edit", "hooks": [ { "type": "prompt", "prompt": "File path: $TOOL_INPUT.file_path. Content preview: $TOOL_INPUT.content (first 200 chars). Verify: 1) Not system directories (/etc, /sys, /usr) 2) Not credentials (.env, tokens, secrets) 3) No path traversal 4) Content doesn't expose secrets. Return 'approve' or 'deny'." } ] } ] } ``` **Benefits:** - Context-aware (considers content too) - Handles symlinks and edge cases - Natural understanding of "system directories" - Can detect secrets in content - Easy to extend criteria ## When to Keep Command Hooks Command hooks still have their place: ### 1. Deterministic Performance Checks ```bash #!/bin/bash # Check file size quickly file_path=$(echo "$input" | jq -r '.tool_input.file_path') size=$(stat -f%z "$file_path" 2>/dev/null || stat -c%s "$file_path" 2>/dev/null) if [ "$size" -gt 10000000 ]; then echo '{"decision": "deny", "reason": "File too large"}' >&2 exit 2 fi ``` **Use command hooks when:** Validation is purely mathematical or deterministic. ### 2. External Tool Integration ```bash #!/bin/bash # Run security scanner file_path=$(echo "$input" | jq -r '.tool_input.file_path') scan_result=$(security-scanner "$file_path") if [ "$?" -ne 0 ]; then echo "Security scan failed: $scan_result" >&2 exit 2 fi ``` **Use command hooks when:** Integrating with external tools that provide yes/no answers. ### 3. Very Fast Checks (< 50ms) ```bash #!/bin/bash # Quick regex check command=$(echo "$input" | jq -r '.tool_input.command') if [[ "$command" =~ ^(ls|pwd|echo)$ ]]; then exit 0 # Safe commands fi ``` **Use command hooks when:** Performance is critical and logic is simple. ## Hybrid Approach Combine both for multi-stage validation: ```json { "PreToolUse": [ { "matcher": "Bash", "hooks": [ { "type": "command", "command": "bash ${CLAUDE_PLUGIN_ROOT}/scripts/quick-check.sh", "timeout": 5 }, { "type": "prompt", "prompt": "Deep analysis of bash command: $TOOL_INPUT", "timeout": 15 } ] } ] } ``` The command hook does fast deterministic checks, while the prompt hook handles complex reasoning. ## Migration Checklist When migrating hooks: - [ ] Identify the validation logic in the command hook - [ ] Convert hard-coded patterns to natural language criteria - [ ] Test with edge cases the old hook missed - [ ] Verify LLM understands the intent - [ ] Set appropriate timeout (usually 15-30s for prompt hooks) - [ ] Document the new hook in README - [ ] Remove or archive old script files ## Migration Tips 1. **Start with one hook**: Don't migrate everything at once 2. **Test thoroughly**: Verify prompt hook catches what command hook caught 3. **Look for improvements**: Use migration as opportunity to enhance validation 4. **Keep scripts for reference**: Archive old scripts in case you need to reference the logic 5. **Document reasoning**: Explain why prompt hook is better in README ## Complete Migration Example ### Original Plugin Structure ``` my-plugin/ ├── .claude-plugin/plugin.json ├── hooks/hooks.json └── scripts/ ├── validate-bash.sh ├── validate-write.sh └── check-tests.sh ``` ### After Migration ``` my-plugin/ ├── .claude-plugin/plugin.json ├── hooks/hooks.json # Now uses prompt hooks └── scripts/ # Archive or delete └── archive/ ├── validate-bash.sh ├── validate-write.sh └── check-tests.sh ``` ### Updated hooks.json ```json { "PreToolUse": [ { "matcher": "Bash", "hooks": [ { "type": "prompt", "prompt": "Validate bash command safety: destructive ops, privilege escalation, network access" } ] }, { "matcher": "Write|Edit", "hooks": [ { "type": "prompt", "prompt": "Validate file write safety: system paths, credentials, path traversal, content secrets" } ] } ], "Stop": [ { "matcher": "*", "hooks": [ { "type": "prompt", "prompt": "Verify tests were run if code was modified" } ] } ] } ``` **Result:** Simpler, more maintainable, more powerful. ## Common Migration Patterns ### Pattern: String Contains → Natural Language **Before:** ```bash if [[ "$command" == *"sudo"* ]]; then echo "Privilege escalation" >&2 exit 2 fi ``` **After:** ``` "Check for privilege escalation (sudo, su, etc)" ``` ### Pattern: Regex → Intent **Before:** ```bash if [[ "$file" =~ \.(env|secret|key|token)$ ]]; then echo "Credential file" >&2 exit 2 fi ``` **After:** ``` "Verify not writing to credential files (.env, secrets, keys, tokens)" ``` ### Pattern: Multiple Conditions → Criteria List **Before:** ```bash if [ condition1 ] || [ condition2 ] || [ condition3 ]; then echo "Invalid" >&2 exit 2 fi ``` **After:** ``` "Check: 1) condition1 2) condition2 3) condition3. Deny if any fail." ``` ## Conclusion Migrating to prompt-based hooks makes plugins more maintainable, flexible, and powerful. Reserve command hooks for deterministic checks and external tool integration. ================================================ FILE: plugins/plugin-dev/skills/hook-development/references/patterns.md ================================================ # Common Hook Patterns This reference provides common, proven patterns for implementing Claude Code hooks. Use these patterns as starting points for typical hook use cases. ## Pattern 1: Security Validation Block dangerous file writes using prompt-based hooks: ```json { "PreToolUse": [ { "matcher": "Write|Edit", "hooks": [ { "type": "prompt", "prompt": "File path: $TOOL_INPUT.file_path. Verify: 1) Not in /etc or system directories 2) Not .env or credentials 3) Path doesn't contain '..' traversal. Return 'approve' or 'deny'." } ] } ] } ``` **Use for:** Preventing writes to sensitive files or system directories. ## Pattern 2: Test Enforcement Ensure tests run before stopping: ```json { "Stop": [ { "matcher": "*", "hooks": [ { "type": "prompt", "prompt": "Review transcript. If code was modified (Write/Edit tools used), verify tests were executed. If no tests were run, block with reason 'Tests must be run after code changes'." } ] } ] } ``` **Use for:** Enforcing quality standards and preventing incomplete work. ## Pattern 3: Context Loading Load project-specific context at session start: ```json { "SessionStart": [ { "matcher": "*", "hooks": [ { "type": "command", "command": "bash ${CLAUDE_PLUGIN_ROOT}/scripts/load-context.sh" } ] } ] } ``` **Example script (load-context.sh):** ```bash #!/bin/bash cd "$CLAUDE_PROJECT_DIR" || exit 1 # Detect project type if [ -f "package.json" ]; then echo "📦 Node.js project detected" echo "export PROJECT_TYPE=nodejs" >> "$CLAUDE_ENV_FILE" elif [ -f "Cargo.toml" ]; then echo "🦀 Rust project detected" echo "export PROJECT_TYPE=rust" >> "$CLAUDE_ENV_FILE" fi ``` **Use for:** Automatically detecting and configuring project-specific settings. ## Pattern 4: Notification Logging Log all notifications for audit or analysis: ```json { "Notification": [ { "matcher": "*", "hooks": [ { "type": "command", "command": "bash ${CLAUDE_PLUGIN_ROOT}/scripts/log-notification.sh" } ] } ] } ``` **Use for:** Tracking user notifications or integration with external logging systems. ## Pattern 5: MCP Tool Monitoring Monitor and validate MCP tool usage: ```json { "PreToolUse": [ { "matcher": "mcp__.*__delete.*", "hooks": [ { "type": "prompt", "prompt": "Deletion operation detected. Verify: Is this deletion intentional? Can it be undone? Are there backups? Return 'approve' only if safe." } ] } ] } ``` **Use for:** Protecting against destructive MCP operations. ## Pattern 6: Build Verification Ensure project builds after code changes: ```json { "Stop": [ { "matcher": "*", "hooks": [ { "type": "prompt", "prompt": "Check if code was modified. If Write/Edit tools were used, verify the project was built (npm run build, cargo build, etc). If not built, block and request build." } ] } ] } ``` **Use for:** Catching build errors before committing or stopping work. ## Pattern 7: Permission Confirmation Ask user before dangerous operations: ```json { "PreToolUse": [ { "matcher": "Bash", "hooks": [ { "type": "prompt", "prompt": "Command: $TOOL_INPUT.command. If command contains 'rm', 'delete', 'drop', or other destructive operations, return 'ask' to confirm with user. Otherwise 'approve'." } ] } ] } ``` **Use for:** User confirmation on potentially destructive commands. ## Pattern 8: Code Quality Checks Run linters or formatters on file edits: ```json { "PostToolUse": [ { "matcher": "Write|Edit", "hooks": [ { "type": "command", "command": "bash ${CLAUDE_PLUGIN_ROOT}/scripts/check-quality.sh" } ] } ] } ``` **Example script (check-quality.sh):** ```bash #!/bin/bash input=$(cat) file_path=$(echo "$input" | jq -r '.tool_input.file_path') # Run linter if applicable if [[ "$file_path" == *.js ]] || [[ "$file_path" == *.ts ]]; then npx eslint "$file_path" 2>&1 || true fi ``` **Use for:** Automatic code quality enforcement. ## Pattern Combinations Combine multiple patterns for comprehensive protection: ```json { "PreToolUse": [ { "matcher": "Write|Edit", "hooks": [ { "type": "prompt", "prompt": "Validate file write safety" } ] }, { "matcher": "Bash", "hooks": [ { "type": "prompt", "prompt": "Validate bash command safety" } ] } ], "Stop": [ { "matcher": "*", "hooks": [ { "type": "prompt", "prompt": "Verify tests run and build succeeded" } ] } ], "SessionStart": [ { "matcher": "*", "hooks": [ { "type": "command", "command": "bash ${CLAUDE_PLUGIN_ROOT}/scripts/load-context.sh" } ] } ] } ``` This provides multi-layered protection and automation. ## Pattern 9: Temporarily Active Hooks Create hooks that only run when explicitly enabled via flag files: ```bash #!/bin/bash # Hook only active when flag file exists FLAG_FILE="$CLAUDE_PROJECT_DIR/.enable-security-scan" if [ ! -f "$FLAG_FILE" ]; then # Quick exit when disabled exit 0 fi # Flag present, run validation input=$(cat) file_path=$(echo "$input" | jq -r '.tool_input.file_path') # Run security scan security-scanner "$file_path" ``` **Activation:** ```bash # Enable the hook touch .enable-security-scan # Disable the hook rm .enable-security-scan ``` **Use for:** - Temporary debugging hooks - Feature flags for development - Project-specific validation that's opt-in - Performance-intensive checks only when needed **Note:** Must restart Claude Code after creating/removing flag files for hooks to recognize changes. ## Pattern 10: Configuration-Driven Hooks Use JSON configuration to control hook behavior: ```bash #!/bin/bash CONFIG_FILE="$CLAUDE_PROJECT_DIR/.claude/my-plugin.local.json" # Read configuration if [ -f "$CONFIG_FILE" ]; then strict_mode=$(jq -r '.strictMode // false' "$CONFIG_FILE") max_file_size=$(jq -r '.maxFileSize // 1000000' "$CONFIG_FILE") else # Defaults strict_mode=false max_file_size=1000000 fi # Skip if not in strict mode if [ "$strict_mode" != "true" ]; then exit 0 fi # Apply configured limits input=$(cat) file_size=$(echo "$input" | jq -r '.tool_input.content | length') if [ "$file_size" -gt "$max_file_size" ]; then echo '{"decision": "deny", "reason": "File exceeds configured size limit"}' >&2 exit 2 fi ``` **Configuration file (.claude/my-plugin.local.json):** ```json { "strictMode": true, "maxFileSize": 500000, "allowedPaths": ["/tmp", "/home/user/projects"] } ``` **Use for:** - User-configurable hook behavior - Per-project settings - Team-specific rules - Dynamic validation criteria ================================================ FILE: plugins/plugin-dev/skills/hook-development/scripts/README.md ================================================ # Hook Development Utility Scripts These scripts help validate, test, and lint hook implementations before deployment. ## validate-hook-schema.sh Validates `hooks.json` configuration files for correct structure and common issues. **Usage:** ```bash ./validate-hook-schema.sh path/to/hooks.json ``` **Checks:** - Valid JSON syntax - Required fields present - Valid hook event names - Proper hook types (command/prompt) - Timeout values in valid ranges - Hardcoded path detection - Prompt hook event compatibility **Example:** ```bash cd my-plugin ./validate-hook-schema.sh hooks/hooks.json ``` ## test-hook.sh Tests individual hook scripts with sample input before deploying to Claude Code. **Usage:** ```bash ./test-hook.sh [options] ``` **Options:** - `-v, --verbose` - Show detailed execution information - `-t, --timeout N` - Set timeout in seconds (default: 60) - `--create-sample ` - Generate sample test input **Example:** ```bash # Create sample test input ./test-hook.sh --create-sample PreToolUse > test-input.json # Test a hook script ./test-hook.sh my-hook.sh test-input.json # Test with verbose output and custom timeout ./test-hook.sh -v -t 30 my-hook.sh test-input.json ``` **Features:** - Sets up proper environment variables (CLAUDE_PROJECT_DIR, CLAUDE_PLUGIN_ROOT) - Measures execution time - Validates output JSON - Shows exit codes and their meanings - Captures environment file output ## hook-linter.sh Checks hook scripts for common issues and best practices violations. **Usage:** ```bash ./hook-linter.sh [hook-script2.sh ...] ``` **Checks:** - Shebang presence - `set -euo pipefail` usage - Stdin input reading - Proper error handling - Variable quoting (injection prevention) - Exit code usage - Hardcoded paths - Long-running code detection - Error output to stderr - Input validation **Example:** ```bash # Lint single script ./hook-linter.sh ../examples/validate-write.sh # Lint multiple scripts ./hook-linter.sh ../examples/*.sh ``` ## Typical Workflow 1. **Write your hook script** ```bash vim my-plugin/scripts/my-hook.sh ``` 2. **Lint the script** ```bash ./hook-linter.sh my-plugin/scripts/my-hook.sh ``` 3. **Create test input** ```bash ./test-hook.sh --create-sample PreToolUse > test-input.json # Edit test-input.json as needed ``` 4. **Test the hook** ```bash ./test-hook.sh -v my-plugin/scripts/my-hook.sh test-input.json ``` 5. **Add to hooks.json** ```bash # Edit my-plugin/hooks/hooks.json ``` 6. **Validate configuration** ```bash ./validate-hook-schema.sh my-plugin/hooks/hooks.json ``` 7. **Test in Claude Code** ```bash claude --debug ``` ## Tips - Always test hooks before deploying to avoid breaking user workflows - Use verbose mode (`-v`) to debug hook behavior - Check the linter output for security and best practice issues - Validate hooks.json after any changes - Create different test inputs for various scenarios (safe operations, dangerous operations, edge cases) ## Common Issues ### Hook doesn't execute Check: - Script has shebang (`#!/bin/bash`) - Script is executable (`chmod +x`) - Path in hooks.json is correct (use `${CLAUDE_PLUGIN_ROOT}`) ### Hook times out - Reduce timeout in hooks.json - Optimize hook script performance - Remove long-running operations ### Hook fails silently - Check exit codes (should be 0 or 2) - Ensure errors go to stderr (`>&2`) - Validate JSON output structure ### Injection vulnerabilities - Always quote variables: `"$variable"` - Use `set -euo pipefail` - Validate all input fields - Run the linter to catch issues ================================================ FILE: plugins/plugin-dev/skills/hook-development/scripts/hook-linter.sh ================================================ #!/bin/bash # Hook Linter # Checks hook scripts for common issues and best practices set -euo pipefail # Usage if [ $# -eq 0 ]; then echo "Usage: $0 [hook-script2.sh ...]" echo "" echo "Checks hook scripts for:" echo " - Shebang presence" echo " - set -euo pipefail usage" echo " - Input reading from stdin" echo " - Proper error handling" echo " - Variable quoting" echo " - Exit code usage" echo " - Hardcoded paths" echo " - Timeout considerations" exit 1 fi check_script() { local script="$1" local warnings=0 local errors=0 echo "🔍 Linting: $script" echo "" if [ ! -f "$script" ]; then echo "❌ Error: File not found" return 1 fi # Check 1: Executable if [ ! -x "$script" ]; then echo "⚠️ Not executable (chmod +x $script)" ((warnings++)) fi # Check 2: Shebang first_line=$(head -1 "$script") if [[ ! "$first_line" =~ ^#!/ ]]; then echo "❌ Missing shebang (#!/bin/bash)" ((errors++)) fi # Check 3: set -euo pipefail if ! grep -q "set -euo pipefail" "$script"; then echo "⚠️ Missing 'set -euo pipefail' (recommended for safety)" ((warnings++)) fi # Check 4: Reads from stdin if ! grep -q "cat\|read" "$script"; then echo "⚠️ Doesn't appear to read input from stdin" ((warnings++)) fi # Check 5: Uses jq for JSON parsing if grep -q "tool_input\|tool_name" "$script" && ! grep -q "jq" "$script"; then echo "⚠️ Parses hook input but doesn't use jq" ((warnings++)) fi # Check 6: Unquoted variables if grep -E '\$[A-Za-z_][A-Za-z0-9_]*[^"]' "$script" | grep -v '#' | grep -q .; then echo "⚠️ Potentially unquoted variables detected (injection risk)" echo " Always use double quotes: \"\$variable\" not \$variable" ((warnings++)) fi # Check 7: Hardcoded paths if grep -E '^[^#]*/home/|^[^#]*/usr/|^[^#]*/opt/' "$script" | grep -q .; then echo "⚠️ Hardcoded absolute paths detected" echo " Use \$CLAUDE_PROJECT_DIR or \$CLAUDE_PLUGIN_ROOT" ((warnings++)) fi # Check 8: Uses CLAUDE_PLUGIN_ROOT if ! grep -q "CLAUDE_PLUGIN_ROOT\|CLAUDE_PROJECT_DIR" "$script"; then echo "💡 Tip: Use \$CLAUDE_PLUGIN_ROOT for plugin-relative paths" fi # Check 9: Exit codes if ! grep -q "exit 0\|exit 2" "$script"; then echo "⚠️ No explicit exit codes (should exit 0 or 2)" ((warnings++)) fi # Check 10: JSON output for decision hooks if grep -q "PreToolUse\|Stop" "$script"; then if ! grep -q "permissionDecision\|decision" "$script"; then echo "💡 Tip: PreToolUse/Stop hooks should output decision JSON" fi fi # Check 11: Long-running commands if grep -E 'sleep [0-9]{3,}|while true' "$script" | grep -v '#' | grep -q .; then echo "⚠️ Potentially long-running code detected" echo " Hooks should complete quickly (< 60s)" ((warnings++)) fi # Check 12: Error messages to stderr if grep -q 'echo.*".*error\|Error\|denied\|Denied' "$script"; then if ! grep -q '>&2' "$script"; then echo "⚠️ Error messages should be written to stderr (>&2)" ((warnings++)) fi fi # Check 13: Input validation if ! grep -q "if.*empty\|if.*null\|if.*-z" "$script"; then echo "💡 Tip: Consider validating input fields aren't empty" fi echo "" echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" if [ $errors -eq 0 ] && [ $warnings -eq 0 ]; then echo "✅ No issues found" return 0 elif [ $errors -eq 0 ]; then echo "⚠️ Found $warnings warning(s)" return 0 else echo "❌ Found $errors error(s) and $warnings warning(s)" return 1 fi } echo "🔎 Hook Script Linter" echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" echo "" total_errors=0 for script in "$@"; do if ! check_script "$script"; then ((total_errors++)) fi echo "" done if [ $total_errors -eq 0 ]; then echo "✅ All scripts passed linting" exit 0 else echo "❌ $total_errors script(s) had errors" exit 1 fi ================================================ FILE: plugins/plugin-dev/skills/hook-development/scripts/test-hook.sh ================================================ #!/bin/bash # Hook Testing Helper # Tests a hook with sample input and shows output set -euo pipefail # Usage show_usage() { echo "Usage: $0 [options] " echo "" echo "Options:" echo " -h, --help Show this help message" echo " -v, --verbose Show detailed execution information" echo " -t, --timeout N Set timeout in seconds (default: 60)" echo "" echo "Examples:" echo " $0 validate-bash.sh test-input.json" echo " $0 -v -t 30 validate-write.sh write-input.json" echo "" echo "Creates sample test input with:" echo " $0 --create-sample " exit 0 } # Create sample input create_sample() { event_type="$1" case "$event_type" in PreToolUse) cat <<'EOF' { "session_id": "test-session", "transcript_path": "/tmp/transcript.txt", "cwd": "/tmp/test-project", "permission_mode": "ask", "hook_event_name": "PreToolUse", "tool_name": "Write", "tool_input": { "file_path": "/tmp/test.txt", "content": "Test content" } } EOF ;; PostToolUse) cat <<'EOF' { "session_id": "test-session", "transcript_path": "/tmp/transcript.txt", "cwd": "/tmp/test-project", "permission_mode": "ask", "hook_event_name": "PostToolUse", "tool_name": "Bash", "tool_result": "Command executed successfully" } EOF ;; Stop|SubagentStop) cat <<'EOF' { "session_id": "test-session", "transcript_path": "/tmp/transcript.txt", "cwd": "/tmp/test-project", "permission_mode": "ask", "hook_event_name": "Stop", "reason": "Task appears complete" } EOF ;; UserPromptSubmit) cat <<'EOF' { "session_id": "test-session", "transcript_path": "/tmp/transcript.txt", "cwd": "/tmp/test-project", "permission_mode": "ask", "hook_event_name": "UserPromptSubmit", "user_prompt": "Test user prompt" } EOF ;; SessionStart|SessionEnd) cat <<'EOF' { "session_id": "test-session", "transcript_path": "/tmp/transcript.txt", "cwd": "/tmp/test-project", "permission_mode": "ask", "hook_event_name": "SessionStart" } EOF ;; *) echo "Unknown event type: $event_type" echo "Valid types: PreToolUse, PostToolUse, Stop, SubagentStop, UserPromptSubmit, SessionStart, SessionEnd" exit 1 ;; esac } # Parse arguments VERBOSE=false TIMEOUT=60 while [ $# -gt 0 ]; do case "$1" in -h|--help) show_usage ;; -v|--verbose) VERBOSE=true shift ;; -t|--timeout) TIMEOUT="$2" shift 2 ;; --create-sample) create_sample "$2" exit 0 ;; *) break ;; esac done if [ $# -ne 2 ]; then echo "Error: Missing required arguments" echo "" show_usage fi HOOK_SCRIPT="$1" TEST_INPUT="$2" # Validate inputs if [ ! -f "$HOOK_SCRIPT" ]; then echo "❌ Error: Hook script not found: $HOOK_SCRIPT" exit 1 fi if [ ! -x "$HOOK_SCRIPT" ]; then echo "⚠️ Warning: Hook script is not executable. Attempting to run with bash..." HOOK_SCRIPT="bash $HOOK_SCRIPT" fi if [ ! -f "$TEST_INPUT" ]; then echo "❌ Error: Test input not found: $TEST_INPUT" exit 1 fi # Validate test input JSON if ! jq empty "$TEST_INPUT" 2>/dev/null; then echo "❌ Error: Test input is not valid JSON" exit 1 fi echo "🧪 Testing hook: $HOOK_SCRIPT" echo "📥 Input: $TEST_INPUT" echo "" if [ "$VERBOSE" = true ]; then echo "Input JSON:" jq . "$TEST_INPUT" echo "" fi # Set up environment export CLAUDE_PROJECT_DIR="${CLAUDE_PROJECT_DIR:-/tmp/test-project}" export CLAUDE_PLUGIN_ROOT="${CLAUDE_PLUGIN_ROOT:-$(pwd)}" export CLAUDE_ENV_FILE="${CLAUDE_ENV_FILE:-/tmp/test-env-$$}" if [ "$VERBOSE" = true ]; then echo "Environment:" echo " CLAUDE_PROJECT_DIR=$CLAUDE_PROJECT_DIR" echo " CLAUDE_PLUGIN_ROOT=$CLAUDE_PLUGIN_ROOT" echo " CLAUDE_ENV_FILE=$CLAUDE_ENV_FILE" echo "" fi # Run the hook echo "▶️ Running hook (timeout: ${TIMEOUT}s)..." echo "" start_time=$(date +%s) set +e output=$(timeout "$TIMEOUT" bash -c "cat '$TEST_INPUT' | $HOOK_SCRIPT" 2>&1) exit_code=$? set -e end_time=$(date +%s) duration=$((end_time - start_time)) # Analyze results echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" echo "Results:" echo "" echo "Exit Code: $exit_code" echo "Duration: ${duration}s" echo "" case $exit_code in 0) echo "✅ Hook approved/succeeded" ;; 2) echo "🚫 Hook blocked/denied" ;; 124) echo "⏱️ Hook timed out after ${TIMEOUT}s" ;; *) echo "⚠️ Hook returned unexpected exit code: $exit_code" ;; esac echo "" echo "Output:" if [ -n "$output" ]; then echo "$output" echo "" # Try to parse as JSON if echo "$output" | jq empty 2>/dev/null; then echo "Parsed JSON output:" echo "$output" | jq . fi else echo "(no output)" fi # Check for environment file if [ -f "$CLAUDE_ENV_FILE" ]; then echo "" echo "Environment file created:" cat "$CLAUDE_ENV_FILE" rm -f "$CLAUDE_ENV_FILE" fi echo "" echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" if [ $exit_code -eq 0 ] || [ $exit_code -eq 2 ]; then echo "✅ Test completed successfully" exit 0 else echo "❌ Test failed" exit 1 fi ================================================ FILE: plugins/plugin-dev/skills/hook-development/scripts/validate-hook-schema.sh ================================================ #!/bin/bash # Hook Schema Validator # Validates hooks.json structure and checks for common issues set -euo pipefail # Usage if [ $# -eq 0 ]; then echo "Usage: $0 " echo "" echo "Validates hook configuration file for:" echo " - Valid JSON syntax" echo " - Required fields" echo " - Hook type validity" echo " - Matcher patterns" echo " - Timeout ranges" exit 1 fi HOOKS_FILE="$1" if [ ! -f "$HOOKS_FILE" ]; then echo "❌ Error: File not found: $HOOKS_FILE" exit 1 fi echo "🔍 Validating hooks configuration: $HOOKS_FILE" echo "" # Check 1: Valid JSON echo "Checking JSON syntax..." if ! jq empty "$HOOKS_FILE" 2>/dev/null; then echo "❌ Invalid JSON syntax" exit 1 fi echo "✅ Valid JSON" # Check 2: Root structure echo "" echo "Checking root structure..." VALID_EVENTS=("PreToolUse" "PostToolUse" "UserPromptSubmit" "Stop" "SubagentStop" "SessionStart" "SessionEnd" "PreCompact" "Notification") for event in $(jq -r 'keys[]' "$HOOKS_FILE"); do found=false for valid_event in "${VALID_EVENTS[@]}"; do if [ "$event" = "$valid_event" ]; then found=true break fi done if [ "$found" = false ]; then echo "⚠️ Unknown event type: $event" fi done echo "✅ Root structure valid" # Check 3: Validate each hook echo "" echo "Validating individual hooks..." error_count=0 warning_count=0 for event in $(jq -r 'keys[]' "$HOOKS_FILE"); do hook_count=$(jq -r ".\"$event\" | length" "$HOOKS_FILE") for ((i=0; i___` **Example:** - Plugin: `asana` - Server: `asana` - Tool: `create_task` - **Full name:** `mcp__plugin_asana_asana__asana_create_task` ### Using MCP Tools in Commands Pre-allow specific MCP tools in command frontmatter: ```markdown --- allowed-tools: [ "mcp__plugin_asana_asana__asana_create_task", "mcp__plugin_asana_asana__asana_search_tasks" ] --- ``` **Wildcard (use sparingly):** ```markdown --- allowed-tools: ["mcp__plugin_asana_asana__*"] --- ``` **Best practice:** Pre-allow specific tools, not wildcards, for security. ## Lifecycle Management **Automatic startup:** - MCP servers start when plugin enables - Connection established before first tool use - Restart required for configuration changes **Lifecycle:** 1. Plugin loads 2. MCP configuration parsed 3. Server process started (stdio) or connection established (SSE/HTTP/WS) 4. Tools discovered and registered 5. Tools available as `mcp__plugin_...__...` **Viewing servers:** Use `/mcp` command to see all servers including plugin-provided ones. ## Authentication Patterns ### OAuth (SSE/HTTP) OAuth handled automatically by Claude Code: ```json { "type": "sse", "url": "https://mcp.example.com/sse" } ``` User authenticates in browser on first use. No additional configuration needed. ### Token-Based (Headers) Static or environment variable tokens: ```json { "type": "http", "url": "https://api.example.com", "headers": { "Authorization": "Bearer ${API_TOKEN}" } } ``` Document required environment variables in README. ### Environment Variables (stdio) Pass configuration to MCP server: ```json { "command": "python", "args": ["-m", "my_mcp_server"], "env": { "DATABASE_URL": "${DB_URL}", "API_KEY": "${API_KEY}", "LOG_LEVEL": "info" } } ``` ## Integration Patterns ### Pattern 1: Simple Tool Wrapper Commands use MCP tools with user interaction: ```markdown # Command: create-item.md --- allowed-tools: ["mcp__plugin_name_server__create_item"] --- Steps: 1. Gather item details from user 2. Use mcp__plugin_name_server__create_item 3. Confirm creation ``` **Use for:** Adding validation or preprocessing before MCP calls. ### Pattern 2: Autonomous Agent Agents use MCP tools autonomously: ```markdown # Agent: data-analyzer.md Analysis Process: 1. Query data via mcp__plugin_db_server__query 2. Process and analyze results 3. Generate insights report ``` **Use for:** Multi-step MCP workflows without user interaction. ### Pattern 3: Multi-Server Plugin Integrate multiple MCP servers: ```json { "github": { "type": "sse", "url": "https://mcp.github.com/sse" }, "jira": { "type": "sse", "url": "https://mcp.jira.com/sse" } } ``` **Use for:** Workflows spanning multiple services. ## Security Best Practices ### Use HTTPS/WSS Always use secure connections: ```json ✅ "url": "https://mcp.example.com/sse" ❌ "url": "http://mcp.example.com/sse" ``` ### Token Management **DO:** - ✅ Use environment variables for tokens - ✅ Document required env vars in README - ✅ Let OAuth flow handle authentication **DON'T:** - ❌ Hardcode tokens in configuration - ❌ Commit tokens to git - ❌ Share tokens in documentation ### Permission Scoping Pre-allow only necessary MCP tools: ```markdown ✅ allowed-tools: [ "mcp__plugin_api_server__read_data", "mcp__plugin_api_server__create_item" ] ❌ allowed-tools: ["mcp__plugin_api_server__*"] ``` ## Error Handling ### Connection Failures Handle MCP server unavailability: - Provide fallback behavior in commands - Inform user of connection issues - Check server URL and configuration ### Tool Call Errors Handle failed MCP operations: - Validate inputs before calling MCP tools - Provide clear error messages - Check rate limiting and quotas ### Configuration Errors Validate MCP configuration: - Test server connectivity during development - Validate JSON syntax - Check required environment variables ## Performance Considerations ### Lazy Loading MCP servers connect on-demand: - Not all servers connect at startup - First tool use triggers connection - Connection pooling managed automatically ### Batching Batch similar requests when possible: ``` # Good: Single query with filters tasks = search_tasks(project="X", assignee="me", limit=50) # Avoid: Many individual queries for id in task_ids: task = get_task(id) ``` ## Testing MCP Integration ### Local Testing 1. Configure MCP server in `.mcp.json` 2. Install plugin locally (`.claude-plugin/`) 3. Run `/mcp` to verify server appears 4. Test tool calls in commands 5. Check `claude --debug` logs for connection issues ### Validation Checklist - [ ] MCP configuration is valid JSON - [ ] Server URL is correct and accessible - [ ] Required environment variables documented - [ ] Tools appear in `/mcp` output - [ ] Authentication works (OAuth or tokens) - [ ] Tool calls succeed from commands - [ ] Error cases handled gracefully ## Debugging ### Enable Debug Logging ```bash claude --debug ``` Look for: - MCP server connection attempts - Tool discovery logs - Authentication flows - Tool call errors ### Common Issues **Server not connecting:** - Check URL is correct - Verify server is running (stdio) - Check network connectivity - Review authentication configuration **Tools not available:** - Verify server connected successfully - Check tool names match exactly - Run `/mcp` to see available tools - Restart Claude Code after config changes **Authentication failing:** - Clear cached auth tokens - Re-authenticate - Check token scopes and permissions - Verify environment variables set ## Quick Reference ### MCP Server Types | Type | Transport | Best For | Auth | |------|-----------|----------|------| | stdio | Process | Local tools, custom servers | Env vars | | SSE | HTTP | Hosted services, cloud APIs | OAuth | | HTTP | REST | API backends, token auth | Tokens | | ws | WebSocket | Real-time, streaming | Tokens | ### Configuration Checklist - [ ] Server type specified (stdio/SSE/HTTP/ws) - [ ] Type-specific fields complete (command or url) - [ ] Authentication configured - [ ] Environment variables documented - [ ] HTTPS/WSS used (not HTTP/WS) - [ ] ${CLAUDE_PLUGIN_ROOT} used for paths ### Best Practices **DO:** - ✅ Use ${CLAUDE_PLUGIN_ROOT} for portable paths - ✅ Document required environment variables - ✅ Use secure connections (HTTPS/WSS) - ✅ Pre-allow specific MCP tools in commands - ✅ Test MCP integration before publishing - ✅ Handle connection and tool errors gracefully **DON'T:** - ❌ Hardcode absolute paths - ❌ Commit credentials to git - ❌ Use HTTP instead of HTTPS - ❌ Pre-allow all tools with wildcards - ❌ Skip error handling - ❌ Forget to document setup ## Additional Resources ### Reference Files For detailed information, consult: - **`references/server-types.md`** - Deep dive on each server type - **`references/authentication.md`** - Authentication patterns and OAuth - **`references/tool-usage.md`** - Using MCP tools in commands and agents ### Example Configurations Working examples in `examples/`: - **`stdio-server.json`** - Local stdio MCP server - **`sse-server.json`** - Hosted SSE server with OAuth - **`http-server.json`** - REST API with token auth ### External Resources - **Official MCP Docs**: https://modelcontextprotocol.io/ - **Claude Code MCP Docs**: https://docs.claude.com/en/docs/claude-code/mcp - **MCP SDK**: @modelcontextprotocol/sdk - **Testing**: Use `claude --debug` and `/mcp` command ## Implementation Workflow To add MCP integration to a plugin: 1. Choose MCP server type (stdio, SSE, HTTP, ws) 2. Create `.mcp.json` at plugin root with configuration 3. Use ${CLAUDE_PLUGIN_ROOT} for all file references 4. Document required environment variables in README 5. Test locally with `/mcp` command 6. Pre-allow MCP tools in relevant commands 7. Handle authentication (OAuth or tokens) 8. Test error cases (connection failures, auth errors) 9. Document MCP integration in plugin README Focus on stdio for custom/local servers, SSE for hosted services with OAuth. ================================================ FILE: plugins/plugin-dev/skills/mcp-integration/examples/http-server.json ================================================ { "_comment": "Example HTTP MCP server configuration for REST APIs", "rest-api": { "type": "http", "url": "https://api.example.com/mcp", "headers": { "Authorization": "Bearer ${API_TOKEN}", "Content-Type": "application/json", "X-API-Version": "2024-01-01" } }, "internal-service": { "type": "http", "url": "https://api.example.com/mcp", "headers": { "Authorization": "Bearer ${API_TOKEN}", "X-Service-Name": "claude-plugin" } } } ================================================ FILE: plugins/plugin-dev/skills/mcp-integration/examples/sse-server.json ================================================ { "_comment": "Example SSE MCP server configuration for hosted cloud services", "asana": { "type": "sse", "url": "https://mcp.asana.com/sse" }, "github": { "type": "sse", "url": "https://mcp.github.com/sse" }, "custom-service": { "type": "sse", "url": "https://mcp.example.com/sse", "headers": { "X-API-Version": "v1", "X-Client-ID": "${CLIENT_ID}" } } } ================================================ FILE: plugins/plugin-dev/skills/mcp-integration/examples/stdio-server.json ================================================ { "_comment": "Example stdio MCP server configuration for local file system access", "filesystem": { "command": "npx", "args": ["-y", "@modelcontextprotocol/server-filesystem", "${CLAUDE_PROJECT_DIR}"], "env": { "LOG_LEVEL": "info" } }, "database": { "command": "${CLAUDE_PLUGIN_ROOT}/servers/db-server.js", "args": ["--config", "${CLAUDE_PLUGIN_ROOT}/config/db.json"], "env": { "DATABASE_URL": "${DATABASE_URL}", "DB_POOL_SIZE": "10" } }, "custom-tools": { "command": "python", "args": ["-m", "my_mcp_server", "--port", "8080"], "env": { "API_KEY": "${CUSTOM_API_KEY}", "DEBUG": "false" } } } ================================================ FILE: plugins/plugin-dev/skills/mcp-integration/references/authentication.md ================================================ # MCP Authentication Patterns Complete guide to authentication methods for MCP servers in Claude Code plugins. ## Overview MCP servers support multiple authentication methods depending on the server type and service requirements. Choose the method that best matches your use case and security requirements. ## OAuth (Automatic) ### How It Works Claude Code automatically handles the complete OAuth 2.0 flow for SSE and HTTP servers: 1. User attempts to use MCP tool 2. Claude Code detects authentication needed 3. Opens browser for OAuth consent 4. User authorizes in browser 5. Tokens stored securely by Claude Code 6. Automatic token refresh ### Configuration ```json { "service": { "type": "sse", "url": "https://mcp.example.com/sse" } } ``` No additional auth configuration needed! Claude Code handles everything. ### Supported Services **Known OAuth-enabled MCP servers:** - Asana: `https://mcp.asana.com/sse` - GitHub (when available) - Google services (when available) - Custom OAuth servers ### OAuth Scopes OAuth scopes are determined by the MCP server. Users see required scopes during the consent flow. **Document required scopes in your README:** ```markdown ## Authentication This plugin requires the following Asana permissions: - Read tasks and projects - Create and update tasks - Access workspace data ``` ### Token Storage Tokens are stored securely by Claude Code: - Not accessible to plugins - Encrypted at rest - Automatic refresh - Cleared on sign-out ### Troubleshooting OAuth **Authentication loop:** - Clear cached tokens (sign out and sign in) - Check OAuth redirect URLs - Verify server OAuth configuration **Scope issues:** - User may need to re-authorize for new scopes - Check server documentation for required scopes **Token expiration:** - Claude Code auto-refreshes - If refresh fails, prompts re-authentication ## Token-Based Authentication ### Bearer Tokens Most common for HTTP and WebSocket servers. **Configuration:** ```json { "api": { "type": "http", "url": "https://api.example.com/mcp", "headers": { "Authorization": "Bearer ${API_TOKEN}" } } } ``` **Environment variable:** ```bash export API_TOKEN="your-secret-token-here" ``` ### API Keys Alternative to Bearer tokens, often in custom headers. **Configuration:** ```json { "api": { "type": "http", "url": "https://api.example.com/mcp", "headers": { "X-API-Key": "${API_KEY}", "X-API-Secret": "${API_SECRET}" } } } ``` ### Custom Headers Services may use custom authentication headers. **Configuration:** ```json { "service": { "type": "sse", "url": "https://mcp.example.com/sse", "headers": { "X-Auth-Token": "${AUTH_TOKEN}", "X-User-ID": "${USER_ID}", "X-Tenant-ID": "${TENANT_ID}" } } } ``` ### Documenting Token Requirements Always document in your README: ```markdown ## Setup ### Required Environment Variables Set these environment variables before using the plugin: \`\`\`bash export API_TOKEN="your-token-here" export API_SECRET="your-secret-here" \`\`\` ### Obtaining Tokens 1. Visit https://api.example.com/tokens 2. Create a new API token 3. Copy the token and secret 4. Set environment variables as shown above ### Token Permissions The API token needs the following permissions: - Read access to resources - Write access for creating items - Delete access (optional, for cleanup operations) \`\`\` ``` ## Environment Variable Authentication (stdio) ### Passing Credentials to Server For stdio servers, pass credentials via environment variables: ```json { "database": { "command": "python", "args": ["-m", "mcp_server_db"], "env": { "DATABASE_URL": "${DATABASE_URL}", "DB_USER": "${DB_USER}", "DB_PASSWORD": "${DB_PASSWORD}" } } } ``` ### User Environment Variables ```bash # User sets these in their shell export DATABASE_URL="postgresql://localhost/mydb" export DB_USER="myuser" export DB_PASSWORD="mypassword" ``` ### Documentation Template ```markdown ## Database Configuration Set these environment variables: \`\`\`bash export DATABASE_URL="postgresql://host:port/database" export DB_USER="username" export DB_PASSWORD="password" \`\`\` Or create a `.env` file (add to `.gitignore`): \`\`\` DATABASE_URL=postgresql://localhost:5432/mydb DB_USER=myuser DB_PASSWORD=mypassword \`\`\` Load with: \`source .env\` or \`export $(cat .env | xargs)\` \`\`\` ``` ## Dynamic Headers ### Headers Helper Script For tokens that change or expire, use a helper script: ```json { "api": { "type": "sse", "url": "https://api.example.com", "headersHelper": "${CLAUDE_PLUGIN_ROOT}/scripts/get-headers.sh" } } ``` **Script (get-headers.sh):** ```bash #!/bin/bash # Generate dynamic authentication headers # Fetch fresh token TOKEN=$(get-fresh-token-from-somewhere) # Output JSON headers cat <___`. Use these tools in commands and agents just like built-in Claude Code tools. ## Tool Naming Convention ### Format ``` mcp__plugin____ ``` ### Examples **Asana plugin with asana server:** - `mcp__plugin_asana_asana__asana_create_task` - `mcp__plugin_asana_asana__asana_search_tasks` - `mcp__plugin_asana_asana__asana_get_project` **Custom plugin with database server:** - `mcp__plugin_myplug_database__query` - `mcp__plugin_myplug_database__execute` - `mcp__plugin_myplug_database__list_tables` ### Discovering Tool Names **Use `/mcp` command:** ```bash /mcp ``` This shows: - All available MCP servers - Tools provided by each server - Tool schemas and descriptions - Full tool names for use in configuration ## Using Tools in Commands ### Pre-Allowing Tools Specify MCP tools in command frontmatter: ```markdown --- description: Create a new Asana task allowed-tools: [ "mcp__plugin_asana_asana__asana_create_task" ] --- # Create Task Command To create a task: 1. Gather task details from user 2. Use mcp__plugin_asana_asana__asana_create_task with the details 3. Confirm creation to user ``` ### Multiple Tools ```markdown --- allowed-tools: [ "mcp__plugin_asana_asana__asana_create_task", "mcp__plugin_asana_asana__asana_search_tasks", "mcp__plugin_asana_asana__asana_get_project" ] --- ``` ### Wildcard (Use Sparingly) ```markdown --- allowed-tools: ["mcp__plugin_asana_asana__*"] --- ``` **Caution:** Only use wildcards if the command truly needs access to all tools from a server. ### Tool Usage in Command Instructions **Example command:** ```markdown --- description: Search and create Asana tasks allowed-tools: [ "mcp__plugin_asana_asana__asana_search_tasks", "mcp__plugin_asana_asana__asana_create_task" ] --- # Asana Task Management ## Searching Tasks To search for tasks: 1. Use mcp__plugin_asana_asana__asana_search_tasks 2. Provide search filters (assignee, project, etc.) 3. Display results to user ## Creating Tasks To create a task: 1. Gather task details: - Title (required) - Description - Project - Assignee - Due date 2. Use mcp__plugin_asana_asana__asana_create_task 3. Show confirmation with task link ``` ## Using Tools in Agents ### Agent Configuration Agents can use MCP tools autonomously without pre-allowing them: ```markdown --- name: asana-status-updater description: This agent should be used when the user asks to "update Asana status", "generate project report", or "sync Asana tasks" model: inherit color: blue --- ## Role Autonomous agent for generating Asana project status reports. ## Process 1. **Query tasks**: Use mcp__plugin_asana_asana__asana_search_tasks to get all tasks 2. **Analyze progress**: Calculate completion rates and identify blockers 3. **Generate report**: Create formatted status update 4. **Update Asana**: Use mcp__plugin_asana_asana__asana_create_comment to post report ## Available Tools The agent has access to all Asana MCP tools without pre-approval. ``` ### Agent Tool Access Agents have broader tool access than commands: - Can use any tool Claude determines is necessary - Don't need pre-allowed lists - Should document which tools they typically use ## Tool Call Patterns ### Pattern 1: Simple Tool Call Single tool call with validation: ```markdown Steps: 1. Validate user provided required fields 2. Call mcp__plugin_api_server__create_item with validated data 3. Check for errors 4. Display confirmation ``` ### Pattern 2: Sequential Tools Chain multiple tool calls: ```markdown Steps: 1. Search for existing items: mcp__plugin_api_server__search 2. If not found, create new: mcp__plugin_api_server__create 3. Add metadata: mcp__plugin_api_server__update_metadata 4. Return final item ID ``` ### Pattern 3: Batch Operations Multiple calls with same tool: ```markdown Steps: 1. Get list of items to process 2. For each item: - Call mcp__plugin_api_server__update_item - Track success/failure 3. Report results summary ``` ### Pattern 4: Error Handling Graceful error handling: ```markdown Steps: 1. Try to call mcp__plugin_api_server__get_data 2. If error (rate limit, network, etc.): - Wait and retry (max 3 attempts) - If still failing, inform user - Suggest checking configuration 3. On success, process data ``` ## Tool Parameters ### Understanding Tool Schemas Each MCP tool has a schema defining its parameters. View with `/mcp`. **Example schema:** ```json { "name": "asana_create_task", "description": "Create a new Asana task", "inputSchema": { "type": "object", "properties": { "name": { "type": "string", "description": "Task title" }, "notes": { "type": "string", "description": "Task description" }, "workspace": { "type": "string", "description": "Workspace GID" } }, "required": ["name", "workspace"] } } ``` ### Calling Tools with Parameters Claude automatically structures tool calls based on schema: ```typescript // Claude generates this internally { toolName: "mcp__plugin_asana_asana__asana_create_task", input: { name: "Review PR #123", notes: "Code review for new feature", workspace: "12345", assignee: "67890", due_on: "2025-01-15" } } ``` ### Parameter Validation **In commands, validate before calling:** ```markdown Steps: 1. Check required parameters: - Title is not empty - Workspace ID is provided - Due date is valid format (YYYY-MM-DD) 2. If validation fails, ask user to provide missing data 3. If validation passes, call MCP tool 4. Handle tool errors gracefully ``` ## Response Handling ### Success Responses ```markdown Steps: 1. Call MCP tool 2. On success: - Extract relevant data from response - Format for user display - Provide confirmation message - Include relevant links or IDs ``` ### Error Responses ```markdown Steps: 1. Call MCP tool 2. On error: - Check error type (auth, rate limit, validation, etc.) - Provide helpful error message - Suggest remediation steps - Don't expose internal error details to user ``` ### Partial Success ```markdown Steps: 1. Batch operation with multiple MCP calls 2. Track successes and failures separately 3. Report summary: - "Successfully processed 8 of 10 items" - "Failed items: [item1, item2] due to [reason]" - Suggest retry or manual intervention ``` ## Performance Optimization ### Batching Requests **Good: Single query with filters** ```markdown Steps: 1. Call mcp__plugin_api_server__search with filters: - project_id: "123" - status: "active" - limit: 100 2. Process all results ``` **Avoid: Many individual queries** ```markdown Steps: 1. For each item ID: - Call mcp__plugin_api_server__get_item - Process item ``` ### Caching Results ```markdown Steps: 1. Call expensive MCP operation: mcp__plugin_api_server__analyze 2. Store results in variable for reuse 3. Use cached results for subsequent operations 4. Only re-fetch if data changes ``` ### Parallel Tool Calls When tools don't depend on each other, call in parallel: ```markdown Steps: 1. Make parallel calls (Claude handles this automatically): - mcp__plugin_api_server__get_project - mcp__plugin_api_server__get_users - mcp__plugin_api_server__get_tags 2. Wait for all to complete 3. Combine results ``` ## Integration Best Practices ### User Experience **Provide feedback:** ```markdown Steps: 1. Inform user: "Searching Asana tasks..." 2. Call mcp__plugin_asana_asana__asana_search_tasks 3. Show progress: "Found 15 tasks, analyzing..." 4. Present results ``` **Handle long operations:** ```markdown Steps: 1. Warn user: "This may take a minute..." 2. Break into smaller steps with updates 3. Show incremental progress 4. Final summary when complete ``` ### Error Messages **Good error messages:** ``` ❌ "Could not create task. Please check: 1. You're logged into Asana 2. You have access to workspace 'Engineering' 3. The project 'Q1 Goals' exists" ``` **Poor error messages:** ``` ❌ "Error: MCP tool returned 403" ``` ### Documentation **Document MCP tool usage in command:** ```markdown ## MCP Tools Used This command uses the following Asana MCP tools: - **asana_search_tasks**: Search for tasks matching criteria - **asana_create_task**: Create new task with details - **asana_update_task**: Update existing task properties Ensure you're authenticated to Asana before running this command. ``` ## Testing Tool Usage ### Local Testing 1. **Configure MCP server** in `.mcp.json` 2. **Install plugin locally** in `.claude-plugin/` 3. **Verify tools available** with `/mcp` 4. **Test command** that uses tools 5. **Check debug output**: `claude --debug` ### Test Scenarios **Test successful calls:** ```markdown Steps: 1. Create test data in external service 2. Run command that queries this data 3. Verify correct results returned ``` **Test error cases:** ```markdown Steps: 1. Test with missing authentication 2. Test with invalid parameters 3. Test with non-existent resources 4. Verify graceful error handling ``` **Test edge cases:** ```markdown Steps: 1. Test with empty results 2. Test with maximum results 3. Test with special characters 4. Test with concurrent access ``` ## Common Patterns ### Pattern: CRUD Operations ```markdown --- allowed-tools: [ "mcp__plugin_api_server__create_item", "mcp__plugin_api_server__read_item", "mcp__plugin_api_server__update_item", "mcp__plugin_api_server__delete_item" ] --- # Item Management ## Create Use create_item with required fields... ## Read Use read_item with item ID... ## Update Use update_item with item ID and changes... ## Delete Use delete_item with item ID (ask for confirmation first)... ``` ### Pattern: Search and Process ```markdown Steps: 1. **Search**: mcp__plugin_api_server__search with filters 2. **Filter**: Apply additional local filtering if needed 3. **Transform**: Process each result 4. **Present**: Format and display to user ``` ### Pattern: Multi-Step Workflow ```markdown Steps: 1. **Setup**: Gather all required information 2. **Validate**: Check data completeness 3. **Execute**: Chain of MCP tool calls: - Create parent resource - Create child resources - Link resources together - Add metadata 4. **Verify**: Confirm all steps succeeded 5. **Report**: Provide summary to user ``` ## Troubleshooting ### Tools Not Available **Check:** - MCP server configured correctly - Server connected (check `/mcp`) - Tool names match exactly (case-sensitive) - Restart Claude Code after config changes ### Tool Calls Failing **Check:** - Authentication is valid - Parameters match tool schema - Required parameters provided - Check `claude --debug` logs ### Performance Issues **Check:** - Batching queries instead of individual calls - Caching results when appropriate - Not making unnecessary tool calls - Parallel calls when possible ## Conclusion Effective MCP tool usage requires: 1. **Understanding tool schemas** via `/mcp` 2. **Pre-allowing tools** in commands appropriately 3. **Handling errors gracefully** 4. **Optimizing performance** with batching and caching 5. **Providing good UX** with feedback and clear errors 6. **Testing thoroughly** before deployment Follow these patterns for robust MCP tool integration in your plugin commands and agents. ================================================ FILE: plugins/plugin-dev/skills/plugin-settings/SKILL.md ================================================ --- name: Plugin Settings description: This skill should be used when the user asks about "plugin settings", "store plugin configuration", "user-configurable plugin", ".local.md files", "plugin state files", "read YAML frontmatter", "per-project plugin settings", or wants to make plugin behavior configurable. Documents the .claude/plugin-name.local.md pattern for storing plugin-specific configuration with YAML frontmatter and markdown content. version: 0.1.0 --- # Plugin Settings Pattern for Claude Code Plugins ## Overview Plugins can store user-configurable settings and state in `.claude/plugin-name.local.md` files within the project directory. This pattern uses YAML frontmatter for structured configuration and markdown content for prompts or additional context. **Key characteristics:** - File location: `.claude/plugin-name.local.md` in project root - Structure: YAML frontmatter + markdown body - Purpose: Per-project plugin configuration and state - Usage: Read from hooks, commands, and agents - Lifecycle: User-managed (not in git, should be in `.gitignore`) ## File Structure ### Basic Template ```markdown --- enabled: true setting1: value1 setting2: value2 numeric_setting: 42 list_setting: ["item1", "item2"] --- # Additional Context This markdown body can contain: - Task descriptions - Additional instructions - Prompts to feed back to Claude - Documentation or notes ``` ### Example: Plugin State File **.claude/my-plugin.local.md:** ```markdown --- enabled: true strict_mode: false max_retries: 3 notification_level: info coordinator_session: team-leader --- # Plugin Configuration This plugin is configured for standard validation mode. Contact @team-lead with questions. ``` ## Reading Settings Files ### From Hooks (Bash Scripts) **Pattern: Check existence and parse frontmatter** ```bash #!/bin/bash set -euo pipefail # Define state file path STATE_FILE=".claude/my-plugin.local.md" # Quick exit if file doesn't exist if [[ ! -f "$STATE_FILE" ]]; then exit 0 # Plugin not configured, skip fi # Parse YAML frontmatter (between --- markers) FRONTMATTER=$(sed -n '/^---$/,/^---$/{ /^---$/d; p; }' "$STATE_FILE") # Extract individual fields ENABLED=$(echo "$FRONTMATTER" | grep '^enabled:' | sed 's/enabled: *//' | sed 's/^"\(.*\)"$/\1/') STRICT_MODE=$(echo "$FRONTMATTER" | grep '^strict_mode:' | sed 's/strict_mode: *//' | sed 's/^"\(.*\)"$/\1/') # Check if enabled if [[ "$ENABLED" != "true" ]]; then exit 0 # Disabled fi # Use configuration in hook logic if [[ "$STRICT_MODE" == "true" ]]; then # Apply strict validation # ... fi ``` See `examples/read-settings-hook.sh` for complete working example. ### From Commands Commands can read settings files to customize behavior: ```markdown --- description: Process data with plugin allowed-tools: ["Read", "Bash"] --- # Process Command Steps: 1. Check if settings exist at `.claude/my-plugin.local.md` 2. Read configuration using Read tool 3. Parse YAML frontmatter to extract settings 4. Apply settings to processing logic 5. Execute with configured behavior ``` ### From Agents Agents can reference settings in their instructions: ```markdown --- name: configured-agent description: Agent that adapts to project settings --- Check for plugin settings at `.claude/my-plugin.local.md`. If present, parse YAML frontmatter and adapt behavior according to: - enabled: Whether plugin is active - mode: Processing mode (strict, standard, lenient) - Additional configuration fields ``` ## Parsing Techniques ### Extract Frontmatter ```bash # Extract everything between --- markers FRONTMATTER=$(sed -n '/^---$/,/^---$/{ /^---$/d; p; }' "$FILE") ``` ### Read Individual Fields **String fields:** ```bash VALUE=$(echo "$FRONTMATTER" | grep '^field_name:' | sed 's/field_name: *//' | sed 's/^"\(.*\)"$/\1/') ``` **Boolean fields:** ```bash ENABLED=$(echo "$FRONTMATTER" | grep '^enabled:' | sed 's/enabled: *//') # Compare: if [[ "$ENABLED" == "true" ]]; then ``` **Numeric fields:** ```bash MAX=$(echo "$FRONTMATTER" | grep '^max_value:' | sed 's/max_value: *//') # Use: if [[ $MAX -gt 100 ]]; then ``` ### Read Markdown Body Extract content after second `---`: ```bash # Get everything after closing --- BODY=$(awk '/^---$/{i++; next} i>=2' "$FILE") ``` ## Common Patterns ### Pattern 1: Temporarily Active Hooks Use settings file to control hook activation: ```bash #!/bin/bash STATE_FILE=".claude/security-scan.local.md" # Quick exit if not configured if [[ ! -f "$STATE_FILE" ]]; then exit 0 fi # Read enabled flag FRONTMATTER=$(sed -n '/^---$/,/^---$/{ /^---$/d; p; }' "$STATE_FILE") ENABLED=$(echo "$FRONTMATTER" | grep '^enabled:' | sed 's/enabled: *//') if [[ "$ENABLED" != "true" ]]; then exit 0 # Disabled fi # Run hook logic # ... ``` **Use case:** Enable/disable hooks without editing hooks.json (requires restart). ### Pattern 2: Agent State Management Store agent-specific state and configuration: **.claude/multi-agent-swarm.local.md:** ```markdown --- agent_name: auth-agent task_number: 3.5 pr_number: 1234 coordinator_session: team-leader enabled: true dependencies: ["Task 3.4"] --- # Task Assignment Implement JWT authentication for the API. **Success Criteria:** - Authentication endpoints created - Tests passing - PR created and CI green ``` Read from hooks to coordinate agents: ```bash AGENT_NAME=$(echo "$FRONTMATTER" | grep '^agent_name:' | sed 's/agent_name: *//') COORDINATOR=$(echo "$FRONTMATTER" | grep '^coordinator_session:' | sed 's/coordinator_session: *//') # Send notification to coordinator tmux send-keys -t "$COORDINATOR" "Agent $AGENT_NAME completed task" Enter ``` ### Pattern 3: Configuration-Driven Behavior **.claude/my-plugin.local.md:** ```markdown --- validation_level: strict max_file_size: 1000000 allowed_extensions: [".js", ".ts", ".tsx"] enable_logging: true --- # Validation Configuration Strict mode enabled for this project. All writes validated against security policies. ``` Use in hooks or commands: ```bash LEVEL=$(echo "$FRONTMATTER" | grep '^validation_level:' | sed 's/validation_level: *//') case "$LEVEL" in strict) # Apply strict validation ;; standard) # Apply standard validation ;; lenient) # Apply lenient validation ;; esac ``` ## Creating Settings Files ### From Commands Commands can create settings files: ```markdown # Setup Command Steps: 1. Ask user for configuration preferences 2. Create `.claude/my-plugin.local.md` with YAML frontmatter 3. Set appropriate values based on user input 4. Inform user that settings are saved 5. Remind user to restart Claude Code for hooks to recognize changes ``` ### Template Generation Provide template in plugin README: ```markdown ## Configuration Create `.claude/my-plugin.local.md` in your project: \`\`\`markdown --- enabled: true mode: standard max_retries: 3 --- # Plugin Configuration Your settings are active. \`\`\` After creating or editing, restart Claude Code for changes to take effect. ``` ## Best Practices ### File Naming ✅ **DO:** - Use `.claude/plugin-name.local.md` format - Match plugin name exactly - Use `.local.md` suffix for user-local files ❌ **DON'T:** - Use different directory (not `.claude/`) - Use inconsistent naming - Use `.md` without `.local` (might be committed) ### Gitignore Always add to `.gitignore`: ```gitignore .claude/*.local.md .claude/*.local.json ``` Document this in plugin README. ### Defaults Provide sensible defaults when settings file doesn't exist: ```bash if [[ ! -f "$STATE_FILE" ]]; then # Use defaults ENABLED=true MODE=standard else # Read from file # ... fi ``` ### Validation Validate settings values: ```bash MAX=$(echo "$FRONTMATTER" | grep '^max_value:' | sed 's/max_value: *//') # Validate numeric range if ! [[ "$MAX" =~ ^[0-9]+$ ]] || [[ $MAX -lt 1 ]] || [[ $MAX -gt 100 ]]; then echo "⚠️ Invalid max_value in settings (must be 1-100)" >&2 MAX=10 # Use default fi ``` ### Restart Requirement **Important:** Settings changes require Claude Code restart. Document in your README: ```markdown ## Changing Settings After editing `.claude/my-plugin.local.md`: 1. Save the file 2. Exit Claude Code 3. Restart: `claude` or `cc` 4. New settings will be loaded ``` Hooks cannot be hot-swapped within a session. ## Security Considerations ### Sanitize User Input When writing settings files from user input: ```bash # Escape quotes in user input SAFE_VALUE=$(echo "$USER_INPUT" | sed 's/"/\\"/g') # Write to file cat > "$STATE_FILE" <&2 exit 2 fi ``` ### Permissions Settings files should be: - Readable by user only (`chmod 600`) - Not committed to git - Not shared between users ## Real-World Examples ### multi-agent-swarm Plugin **.claude/multi-agent-swarm.local.md:** ```markdown --- agent_name: auth-implementation task_number: 3.5 pr_number: 1234 coordinator_session: team-leader enabled: true dependencies: ["Task 3.4"] additional_instructions: Use JWT tokens, not sessions --- # Task: Implement Authentication Build JWT-based authentication for the REST API. Coordinate with auth-agent on shared types. ``` **Hook usage (agent-stop-notification.sh):** - Checks if file exists (line 15-18: quick exit if not) - Parses frontmatter to get coordinator_session, agent_name, enabled - Sends notifications to coordinator if enabled - Allows quick activation/deactivation via `enabled: true/false` ### ralph-wiggum Plugin **.claude/ralph-loop.local.md:** ```markdown --- iteration: 1 max_iterations: 10 completion_promise: "All tests passing and build successful" --- Fix all the linting errors in the project. Make sure tests pass after each fix. ``` **Hook usage (stop-hook.sh):** - Checks if file exists (line 15-18: quick exit if not active) - Reads iteration count and max_iterations - Extracts completion_promise for loop termination - Reads body as the prompt to feed back - Updates iteration count on each loop ## Quick Reference ### File Location ``` project-root/ └── .claude/ └── plugin-name.local.md ``` ### Frontmatter Parsing ```bash # Extract frontmatter FRONTMATTER=$(sed -n '/^---$/,/^---$/{ /^---$/d; p; }' "$FILE") # Read field VALUE=$(echo "$FRONTMATTER" | grep '^field:' | sed 's/field: *//' | sed 's/^"\(.*\)"$/\1/') ``` ### Body Parsing ```bash # Extract body (after second ---) BODY=$(awk '/^---$/{i++; next} i>=2' "$FILE") ``` ### Quick Exit Pattern ```bash if [[ ! -f ".claude/my-plugin.local.md" ]]; then exit 0 # Not configured fi ``` ## Additional Resources ### Reference Files For detailed implementation patterns: - **`references/parsing-techniques.md`** - Complete guide to parsing YAML frontmatter and markdown bodies - **`references/real-world-examples.md`** - Deep dive into multi-agent-swarm and ralph-wiggum implementations ### Example Files Working examples in `examples/`: - **`read-settings-hook.sh`** - Hook that reads and uses settings - **`create-settings-command.md`** - Command that creates settings file - **`example-settings.md`** - Template settings file ### Utility Scripts Development tools in `scripts/`: - **`validate-settings.sh`** - Validate settings file structure - **`parse-frontmatter.sh`** - Extract frontmatter fields ## Implementation Workflow To add settings to a plugin: 1. Design settings schema (which fields, types, defaults) 2. Create template file in plugin documentation 3. Add gitignore entry for `.claude/*.local.md` 4. Implement settings parsing in hooks/commands 5. Use quick-exit pattern (check file exists, check enabled field) 6. Document settings in plugin README with template 7. Remind users that changes require Claude Code restart Focus on keeping settings simple and providing good defaults when settings file doesn't exist. ================================================ FILE: plugins/plugin-dev/skills/plugin-settings/examples/create-settings-command.md ================================================ --- description: "Create plugin settings file with user preferences" allowed-tools: ["Write", "AskUserQuestion"] --- # Create Plugin Settings This command helps users create a `.claude/my-plugin.local.md` settings file. ## Steps ### Step 1: Ask User for Preferences Use AskUserQuestion to gather configuration: ```json { "questions": [ { "question": "Enable plugin for this project?", "header": "Enable Plugin", "multiSelect": false, "options": [ { "label": "Yes", "description": "Plugin will be active" }, { "label": "No", "description": "Plugin will be disabled" } ] }, { "question": "Validation mode?", "header": "Mode", "multiSelect": false, "options": [ { "label": "Strict", "description": "Maximum validation and security checks" }, { "label": "Standard", "description": "Balanced validation (recommended)" }, { "label": "Lenient", "description": "Minimal validation only" } ] } ] } ``` ### Step 2: Parse Answers Extract answers from AskUserQuestion result: - answers["0"]: enabled (Yes/No) - answers["1"]: mode (Strict/Standard/Lenient) ### Step 3: Create Settings File Use Write tool to create `.claude/my-plugin.local.md`: ```markdown --- enabled: validation_mode: max_file_size: 1000000 notify_on_errors: true --- # Plugin Configuration Your plugin is configured with validation mode. To modify settings, edit this file and restart Claude Code. ``` ### Step 4: Inform User Tell the user: - Settings file created at `.claude/my-plugin.local.md` - Current configuration summary - How to edit manually if needed - Reminder: Restart Claude Code for changes to take effect - Settings file is gitignored (won't be committed) ## Implementation Notes Always validate user input before writing: - Check mode is valid - Validate numeric fields are numbers - Ensure paths don't have traversal attempts - Sanitize any free-text fields ================================================ FILE: plugins/plugin-dev/skills/plugin-settings/examples/example-settings.md ================================================ # Example Plugin Settings File ## Template: Basic Configuration **.claude/my-plugin.local.md:** ```markdown --- enabled: true mode: standard --- # My Plugin Configuration Plugin is active in standard mode. ``` ## Template: Advanced Configuration **.claude/my-plugin.local.md:** ```markdown --- enabled: true strict_mode: false max_file_size: 1000000 allowed_extensions: [".js", ".ts", ".tsx"] enable_logging: true notification_level: info retry_attempts: 3 timeout_seconds: 60 custom_path: "/path/to/data" --- # My Plugin Advanced Configuration This project uses custom plugin configuration with: - Standard validation mode - 1MB file size limit - JavaScript/TypeScript files allowed - Info-level logging - 3 retry attempts ## Additional Notes Contact @team-lead with questions about this configuration. ``` ## Template: Agent State File **.claude/multi-agent-swarm.local.md:** ```markdown --- agent_name: database-implementation task_number: 4.2 pr_number: 5678 coordinator_session: team-leader enabled: true dependencies: ["Task 3.5", "Task 4.1"] additional_instructions: "Use PostgreSQL, not MySQL" --- # Task Assignment: Database Schema Implementation Implement the database schema for the new features module. ## Requirements - Create migration files - Add indexes for performance - Write tests for constraints - Document schema in README ## Success Criteria - Migrations run successfully - All tests pass - PR created with CI green - Schema documented ## Coordination Depends on: - Task 3.5: API endpoint definitions - Task 4.1: Data model design Report status to coordinator session 'team-leader'. ``` ## Template: Feature Flag Pattern **.claude/experimental-features.local.md:** ```markdown --- enabled: true features: - ai_suggestions - auto_formatting - advanced_refactoring experimental_mode: false --- # Experimental Features Configuration Current enabled features: - AI-powered code suggestions - Automatic code formatting - Advanced refactoring tools Experimental mode is OFF (stable features only). ``` ## Usage in Hooks These templates can be read by hooks: ```bash # Check if plugin is configured if [[ ! -f ".claude/my-plugin.local.md" ]]; then exit 0 # Not configured, skip hook fi # Read settings FRONTMATTER=$(sed -n '/^---$/,/^---$/{ /^---$/d; p; }' ".claude/my-plugin.local.md") ENABLED=$(echo "$FRONTMATTER" | grep '^enabled:' | sed 's/enabled: *//') # Apply settings if [[ "$ENABLED" == "true" ]]; then # Hook is active # ... fi ``` ## Gitignore Always add to project `.gitignore`: ```gitignore # Plugin settings (user-local, not committed) .claude/*.local.md .claude/*.local.json ``` ## Editing Settings Users can edit settings files manually: ```bash # Edit settings vim .claude/my-plugin.local.md # Changes take effect after restart exit # Exit Claude Code claude # Restart ``` Changes require Claude Code restart - hooks can't be hot-swapped. ================================================ FILE: plugins/plugin-dev/skills/plugin-settings/examples/read-settings-hook.sh ================================================ #!/bin/bash # Example hook that reads plugin settings from .claude/my-plugin.local.md # Demonstrates the complete pattern for settings-driven hook behavior set -euo pipefail # Define settings file path SETTINGS_FILE=".claude/my-plugin.local.md" # Quick exit if settings file doesn't exist if [[ ! -f "$SETTINGS_FILE" ]]; then # Plugin not configured - use defaults or skip exit 0 fi # Parse YAML frontmatter (everything between --- markers) FRONTMATTER=$(sed -n '/^---$/,/^---$/{ /^---$/d; p; }' "$SETTINGS_FILE") # Extract configuration fields ENABLED=$(echo "$FRONTMATTER" | grep '^enabled:' | sed 's/enabled: *//' | sed 's/^"\(.*\)"$/\1/') STRICT_MODE=$(echo "$FRONTMATTER" | grep '^strict_mode:' | sed 's/strict_mode: *//' | sed 's/^"\(.*\)"$/\1/') MAX_SIZE=$(echo "$FRONTMATTER" | grep '^max_file_size:' | sed 's/max_file_size: *//') # Quick exit if disabled if [[ "$ENABLED" != "true" ]]; then exit 0 fi # Read hook input input=$(cat) file_path=$(echo "$input" | jq -r '.tool_input.file_path // empty') # Apply configured validation if [[ "$STRICT_MODE" == "true" ]]; then # Strict mode: apply all checks if [[ "$file_path" == *".."* ]]; then echo '{"hookSpecificOutput": {"permissionDecision": "deny"}, "systemMessage": "Path traversal blocked (strict mode)"}' >&2 exit 2 fi if [[ "$file_path" == *".env"* ]] || [[ "$file_path" == *"secret"* ]]; then echo '{"hookSpecificOutput": {"permissionDecision": "deny"}, "systemMessage": "Sensitive file blocked (strict mode)"}' >&2 exit 2 fi else # Standard mode: basic checks only if [[ "$file_path" == "/etc/"* ]] || [[ "$file_path" == "/sys/"* ]]; then echo '{"hookSpecificOutput": {"permissionDecision": "deny"}, "systemMessage": "System path blocked"}' >&2 exit 2 fi fi # Check file size if configured if [[ -n "$MAX_SIZE" ]] && [[ "$MAX_SIZE" =~ ^[0-9]+$ ]]; then content=$(echo "$input" | jq -r '.tool_input.content // empty') content_size=${#content} if [[ $content_size -gt $MAX_SIZE ]]; then echo '{"hookSpecificOutput": {"permissionDecision": "deny"}, "systemMessage": "File exceeds configured max size: '"$MAX_SIZE"' bytes"}' >&2 exit 2 fi fi # All checks passed exit 0 ================================================ FILE: plugins/plugin-dev/skills/plugin-settings/references/parsing-techniques.md ================================================ # Settings File Parsing Techniques Complete guide to parsing `.claude/plugin-name.local.md` files in bash scripts. ## File Structure Settings files use markdown with YAML frontmatter: ```markdown --- field1: value1 field2: "value with spaces" numeric_field: 42 boolean_field: true list_field: ["item1", "item2", "item3"] --- # Markdown Content This body content can be extracted separately. It's useful for prompts, documentation, or additional context. ``` ## Parsing Frontmatter ### Extract Frontmatter Block ```bash #!/bin/bash FILE=".claude/my-plugin.local.md" # Extract everything between --- markers (excluding the markers themselves) FRONTMATTER=$(sed -n '/^---$/,/^---$/{ /^---$/d; p; }' "$FILE") ``` **How it works:** - `sed -n` - Suppress automatic printing - `/^---$/,/^---$/` - Range from first `---` to second `---` - `{ /^---$/d; p; }` - Delete the `---` lines, print everything else ### Extract Individual Fields **String fields:** ```bash # Simple value VALUE=$(echo "$FRONTMATTER" | grep '^field_name:' | sed 's/field_name: *//') # Quoted value (removes surrounding quotes) VALUE=$(echo "$FRONTMATTER" | grep '^field_name:' | sed 's/field_name: *//' | sed 's/^"\(.*\)"$/\1/') ``` **Boolean fields:** ```bash ENABLED=$(echo "$FRONTMATTER" | grep '^enabled:' | sed 's/enabled: *//') # Use in condition if [[ "$ENABLED" == "true" ]]; then # Enabled fi ``` **Numeric fields:** ```bash MAX=$(echo "$FRONTMATTER" | grep '^max_value:' | sed 's/max_value: *//') # Validate it's a number if [[ "$MAX" =~ ^[0-9]+$ ]]; then # Use in numeric comparison if [[ $MAX -gt 100 ]]; then # Too large fi fi ``` **List fields (simple):** ```bash # YAML: list: ["item1", "item2", "item3"] LIST=$(echo "$FRONTMATTER" | grep '^list:' | sed 's/list: *//') # Result: ["item1", "item2", "item3"] # For simple checks: if [[ "$LIST" == *"item1"* ]]; then # List contains item1 fi ``` **List fields (proper parsing with jq):** ```bash # For proper list handling, use yq or convert to JSON # This requires yq to be installed (brew install yq) # Extract list as JSON array LIST=$(echo "$FRONTMATTER" | yq -o json '.list' 2>/dev/null) # Iterate over items echo "$LIST" | jq -r '.[]' | while read -r item; do echo "Processing: $item" done ``` ## Parsing Markdown Body ### Extract Body Content ```bash #!/bin/bash FILE=".claude/my-plugin.local.md" # Extract everything after the closing --- # Counts --- markers: first is opening, second is closing, everything after is body BODY=$(awk '/^---$/{i++; next} i>=2' "$FILE") ``` **How it works:** - `/^---$/` - Match `---` lines - `{i++; next}` - Increment counter and skip the `---` line - `i>=2` - Print all lines after second `---` **Handles edge case:** If `---` appears in the markdown body, it still works because we only count the first two `---` at the start. ### Use Body as Prompt ```bash # Extract body PROMPT=$(awk '/^---$/{i++; next} i>=2' "$RALPH_STATE_FILE") # Feed back to Claude echo '{"decision": "block", "reason": "'"$PROMPT"'"}' | jq . ``` **Important:** Use `jq -n --arg` for safer JSON construction with user content: ```bash PROMPT=$(awk '/^---$/{i++; next} i>=2' "$FILE") # Safe JSON construction jq -n --arg prompt "$PROMPT" '{ "decision": "block", "reason": $prompt }' ``` ## Common Parsing Patterns ### Pattern: Field with Default ```bash VALUE=$(echo "$FRONTMATTER" | grep '^field:' | sed 's/field: *//' | sed 's/^"\(.*\)"$/\1/') # Use default if empty if [[ -z "$VALUE" ]]; then VALUE="default_value" fi ``` ### Pattern: Optional Field ```bash OPTIONAL=$(echo "$FRONTMATTER" | grep '^optional_field:' | sed 's/optional_field: *//' | sed 's/^"\(.*\)"$/\1/') # Only use if present if [[ -n "$OPTIONAL" ]] && [[ "$OPTIONAL" != "null" ]]; then # Field is set, use it echo "Optional field: $OPTIONAL" fi ``` ### Pattern: Multiple Fields at Once ```bash # Parse all fields in one pass while IFS=': ' read -r key value; do # Remove quotes if present value=$(echo "$value" | sed 's/^"\(.*\)"$/\1/') case "$key" in enabled) ENABLED="$value" ;; mode) MODE="$value" ;; max_size) MAX_SIZE="$value" ;; esac done <<< "$FRONTMATTER" ``` ## Updating Settings Files ### Atomic Updates Always use temp file + atomic move to prevent corruption: ```bash #!/bin/bash FILE=".claude/my-plugin.local.md" NEW_VALUE="updated_value" # Create temp file TEMP_FILE="${FILE}.tmp.$$" # Update field using sed sed "s/^field_name: .*/field_name: $NEW_VALUE/" "$FILE" > "$TEMP_FILE" # Atomic replace mv "$TEMP_FILE" "$FILE" ``` ### Update Single Field ```bash # Increment iteration counter CURRENT=$(echo "$FRONTMATTER" | grep '^iteration:' | sed 's/iteration: *//') NEXT=$((CURRENT + 1)) # Update file TEMP_FILE="${FILE}.tmp.$$" sed "s/^iteration: .*/iteration: $NEXT/" "$FILE" > "$TEMP_FILE" mv "$TEMP_FILE" "$FILE" ``` ### Update Multiple Fields ```bash # Update several fields at once TEMP_FILE="${FILE}.tmp.$$" sed -e "s/^iteration: .*/iteration: $NEXT_ITERATION/" \ -e "s/^pr_number: .*/pr_number: $PR_NUMBER/" \ -e "s/^status: .*/status: $NEW_STATUS/" \ "$FILE" > "$TEMP_FILE" mv "$TEMP_FILE" "$FILE" ``` ## Validation Techniques ### Validate File Exists and Is Readable ```bash FILE=".claude/my-plugin.local.md" if [[ ! -f "$FILE" ]]; then echo "Settings file not found" >&2 exit 1 fi if [[ ! -r "$FILE" ]]; then echo "Settings file not readable" >&2 exit 1 fi ``` ### Validate Frontmatter Structure ```bash # Count --- markers (should be exactly 2 at start) MARKER_COUNT=$(grep -c '^---$' "$FILE" 2>/dev/null || echo "0") if [[ $MARKER_COUNT -lt 2 ]]; then echo "Invalid settings file: missing frontmatter markers" >&2 exit 1 fi ``` ### Validate Field Values ```bash MODE=$(echo "$FRONTMATTER" | grep '^mode:' | sed 's/mode: *//') case "$MODE" in strict|standard|lenient) # Valid mode ;; *) echo "Invalid mode: $MODE (must be strict, standard, or lenient)" >&2 exit 1 ;; esac ``` ### Validate Numeric Ranges ```bash MAX_SIZE=$(echo "$FRONTMATTER" | grep '^max_size:' | sed 's/max_size: *//') if ! [[ "$MAX_SIZE" =~ ^[0-9]+$ ]]; then echo "max_size must be a number" >&2 exit 1 fi if [[ $MAX_SIZE -lt 1 ]] || [[ $MAX_SIZE -gt 10000000 ]]; then echo "max_size out of range (1-10000000)" >&2 exit 1 fi ``` ## Edge Cases and Gotchas ### Quotes in Values YAML allows both quoted and unquoted strings: ```yaml # These are equivalent: field1: value field2: "value" field3: 'value' ``` **Handle both:** ```bash # Remove surrounding quotes if present VALUE=$(echo "$FRONTMATTER" | grep '^field:' | sed 's/field: *//' | sed 's/^"\(.*\)"$/\1/' | sed "s/^'\\(.*\\)'$/\\1/") ``` ### --- in Markdown Body If the markdown body contains `---`, the parsing still works because we only match the first two: ```markdown --- field: value --- # Body Here's a separator: --- More content after the separator. ``` The `awk '/^---$/{i++; next} i>=2'` pattern handles this correctly. ### Empty Values Handle missing or empty fields: ```yaml field1: field2: "" field3: null ``` **Parsing:** ```bash VALUE=$(echo "$FRONTMATTER" | grep '^field1:' | sed 's/field1: *//') # VALUE will be empty string # Check for empty/null if [[ -z "$VALUE" ]] || [[ "$VALUE" == "null" ]]; then VALUE="default" fi ``` ### Special Characters Values with special characters need careful handling: ```yaml message: "Error: Something went wrong!" path: "/path/with spaces/file.txt" regex: "^[a-zA-Z0-9_]+$" ``` **Safe parsing:** ```bash # Always quote variables when using MESSAGE=$(echo "$FRONTMATTER" | grep '^message:' | sed 's/message: *//' | sed 's/^"\(.*\)"$/\1/') echo "Message: $MESSAGE" # Quoted! ``` ## Performance Optimization ### Cache Parsed Values If reading settings multiple times: ```bash # Parse once FRONTMATTER=$(sed -n '/^---$/,/^---$/{ /^---$/d; p; }' "$FILE") # Extract multiple fields from cached frontmatter FIELD1=$(echo "$FRONTMATTER" | grep '^field1:' | sed 's/field1: *//') FIELD2=$(echo "$FRONTMATTER" | grep '^field2:' | sed 's/field2: *//') FIELD3=$(echo "$FRONTMATTER" | grep '^field3:' | sed 's/field3: *//') ``` **Don't:** Re-parse file for each field. ### Lazy Loading Only parse settings when needed: ```bash #!/bin/bash input=$(cat) # Quick checks first (no file I/O) tool_name=$(echo "$input" | jq -r '.tool_name') if [[ "$tool_name" != "Write" ]]; then exit 0 # Not a write operation, skip fi # Only now check settings file if [[ -f ".claude/my-plugin.local.md" ]]; then # Parse settings # ... fi ``` ## Debugging ### Print Parsed Values ```bash #!/bin/bash set -x # Enable debug tracing FILE=".claude/my-plugin.local.md" if [[ -f "$FILE" ]]; then echo "Settings file found" >&2 FRONTMATTER=$(sed -n '/^---$/,/^---$/{ /^---$/d; p; }' "$FILE") echo "Frontmatter:" >&2 echo "$FRONTMATTER" >&2 ENABLED=$(echo "$FRONTMATTER" | grep '^enabled:' | sed 's/enabled: *//') echo "Enabled: $ENABLED" >&2 fi ``` ### Validate Parsing ```bash # Show what was parsed echo "Parsed values:" >&2 echo " enabled: $ENABLED" >&2 echo " mode: $MODE" >&2 echo " max_size: $MAX_SIZE" >&2 # Verify expected values if [[ "$ENABLED" != "true" ]] && [[ "$ENABLED" != "false" ]]; then echo "⚠️ Unexpected enabled value: $ENABLED" >&2 fi ``` ## Alternative: Using yq For complex YAML, consider using `yq`: ```bash # Install: brew install yq # Parse YAML properly FRONTMATTER=$(sed -n '/^---$/,/^---$/{ /^---$/d; p; }' "$FILE") # Extract fields with yq ENABLED=$(echo "$FRONTMATTER" | yq '.enabled') MODE=$(echo "$FRONTMATTER" | yq '.mode') LIST=$(echo "$FRONTMATTER" | yq -o json '.list_field') # Iterate list properly echo "$LIST" | jq -r '.[]' | while read -r item; do echo "Item: $item" done ``` **Pros:** - Proper YAML parsing - Handles complex structures - Better list/object support **Cons:** - Requires yq installation - Additional dependency - May not be available on all systems **Recommendation:** Use sed/grep for simple fields, yq for complex structures. ## Complete Example ```bash #!/bin/bash set -euo pipefail # Configuration SETTINGS_FILE=".claude/my-plugin.local.md" # Quick exit if not configured if [[ ! -f "$SETTINGS_FILE" ]]; then # Use defaults ENABLED=true MODE=standard MAX_SIZE=1000000 else # Parse frontmatter FRONTMATTER=$(sed -n '/^---$/,/^---$/{ /^---$/d; p; }' "$SETTINGS_FILE") # Extract fields with defaults ENABLED=$(echo "$FRONTMATTER" | grep '^enabled:' | sed 's/enabled: *//') ENABLED=${ENABLED:-true} MODE=$(echo "$FRONTMATTER" | grep '^mode:' | sed 's/mode: *//' | sed 's/^"\(.*\)"$/\1/') MODE=${MODE:-standard} MAX_SIZE=$(echo "$FRONTMATTER" | grep '^max_size:' | sed 's/max_size: *//') MAX_SIZE=${MAX_SIZE:-1000000} # Validate values if [[ "$ENABLED" != "true" ]] && [[ "$ENABLED" != "false" ]]; then echo "⚠️ Invalid enabled value, using default" >&2 ENABLED=true fi if ! [[ "$MAX_SIZE" =~ ^[0-9]+$ ]]; then echo "⚠️ Invalid max_size, using default" >&2 MAX_SIZE=1000000 fi fi # Quick exit if disabled if [[ "$ENABLED" != "true" ]]; then exit 0 fi # Use configuration echo "Configuration loaded: mode=$MODE, max_size=$MAX_SIZE" >&2 # Apply logic based on settings case "$MODE" in strict) # Strict validation ;; standard) # Standard validation ;; lenient) # Lenient validation ;; esac ``` This provides robust settings handling with defaults, validation, and error recovery. ================================================ FILE: plugins/plugin-dev/skills/plugin-settings/references/real-world-examples.md ================================================ # Real-World Plugin Settings Examples Detailed analysis of how production plugins use the `.claude/plugin-name.local.md` pattern. ## multi-agent-swarm Plugin ### Settings File Structure **.claude/multi-agent-swarm.local.md:** ```markdown --- agent_name: auth-implementation task_number: 3.5 pr_number: 1234 coordinator_session: team-leader enabled: true dependencies: ["Task 3.4"] additional_instructions: "Use JWT tokens, not sessions" --- # Task: Implement Authentication Build JWT-based authentication for the REST API. ## Requirements - JWT token generation and validation - Refresh token flow - Secure password hashing ## Success Criteria - Auth endpoints implemented - Tests passing (100% coverage) - PR created and CI green - Documentation updated ## Coordination Depends on Task 3.4 (user model). Report status to 'team-leader' session. ``` ### How It's Used **File:** `hooks/agent-stop-notification.sh` **Purpose:** Send notifications to coordinator when agent becomes idle **Implementation:** ```bash #!/bin/bash set -euo pipefail SWARM_STATE_FILE=".claude/multi-agent-swarm.local.md" # Quick exit if no swarm active if [[ ! -f "$SWARM_STATE_FILE" ]]; then exit 0 fi # Parse frontmatter FRONTMATTER=$(sed -n '/^---$/,/^---$/{ /^---$/d; p; }' "$SWARM_STATE_FILE") # Extract configuration COORDINATOR_SESSION=$(echo "$FRONTMATTER" | grep '^coordinator_session:' | sed 's/coordinator_session: *//' | sed 's/^"\(.*\)"$/\1/') AGENT_NAME=$(echo "$FRONTMATTER" | grep '^agent_name:' | sed 's/agent_name: *//' | sed 's/^"\(.*\)"$/\1/') TASK_NUMBER=$(echo "$FRONTMATTER" | grep '^task_number:' | sed 's/task_number: *//' | sed 's/^"\(.*\)"$/\1/') PR_NUMBER=$(echo "$FRONTMATTER" | grep '^pr_number:' | sed 's/pr_number: *//' | sed 's/^"\(.*\)"$/\1/') ENABLED=$(echo "$FRONTMATTER" | grep '^enabled:' | sed 's/enabled: *//') # Check if enabled if [[ "$ENABLED" != "true" ]]; then exit 0 fi # Send notification to coordinator NOTIFICATION="🤖 Agent ${AGENT_NAME} (Task ${TASK_NUMBER}, PR #${PR_NUMBER}) is idle." if tmux has-session -t "$COORDINATOR_SESSION" 2>/dev/null; then tmux send-keys -t "$COORDINATOR_SESSION" "$NOTIFICATION" Enter sleep 0.5 tmux send-keys -t "$COORDINATOR_SESSION" Enter fi exit 0 ``` **Key patterns:** 1. **Quick exit** (line 7-9): Returns immediately if file doesn't exist 2. **Field extraction** (lines 11-17): Parses each frontmatter field 3. **Enabled check** (lines 19-21): Respects enabled flag 4. **Action based on settings** (lines 23-29): Uses coordinator_session to send notification ### Creation **File:** `commands/launch-swarm.md` Settings files are created during swarm launch with: ```bash cat > "$WORKTREE_PATH/.claude/multi-agent-swarm.local.md" < temp.md mv temp.md ".claude/multi-agent-swarm.local.md" ``` ## ralph-wiggum Plugin ### Settings File Structure **.claude/ralph-loop.local.md:** ```markdown --- iteration: 1 max_iterations: 10 completion_promise: "All tests passing and build successful" started_at: "2025-01-15T14:30:00Z" --- Fix all the linting errors in the project. Make sure tests pass after each fix. Document any changes needed in CLAUDE.md. ``` ### How It's Used **File:** `hooks/stop-hook.sh` **Purpose:** Prevent session exit and loop Claude's output back as input **Implementation:** ```bash #!/bin/bash set -euo pipefail RALPH_STATE_FILE=".claude/ralph-loop.local.md" # Quick exit if no active loop if [[ ! -f "$RALPH_STATE_FILE" ]]; then exit 0 fi # Parse frontmatter FRONTMATTER=$(sed -n '/^---$/,/^---$/{ /^---$/d; p; }' "$RALPH_STATE_FILE") # Extract configuration ITERATION=$(echo "$FRONTMATTER" | grep '^iteration:' | sed 's/iteration: *//') MAX_ITERATIONS=$(echo "$FRONTMATTER" | grep '^max_iterations:' | sed 's/max_iterations: *//') COMPLETION_PROMISE=$(echo "$FRONTMATTER" | grep '^completion_promise:' | sed 's/completion_promise: *//' | sed 's/^"\(.*\)"$/\1/') # Check max iterations if [[ $MAX_ITERATIONS -gt 0 ]] && [[ $ITERATION -ge $MAX_ITERATIONS ]]; then echo "🛑 Ralph loop: Max iterations ($MAX_ITERATIONS) reached." rm "$RALPH_STATE_FILE" exit 0 fi # Get transcript and check for completion promise TRANSCRIPT_PATH=$(echo "$HOOK_INPUT" | jq -r '.transcript_path') LAST_OUTPUT=$(grep '"role":"assistant"' "$TRANSCRIPT_PATH" | tail -1 | jq -r '.message.content | map(select(.type == "text")) | map(.text) | join("\n")') # Check for completion if [[ "$COMPLETION_PROMISE" != "null" ]] && [[ -n "$COMPLETION_PROMISE" ]]; then PROMISE_TEXT=$(echo "$LAST_OUTPUT" | perl -0777 -pe 's/.*?(.*?)<\/promise>.*/$1/s; s/^\s+|\s+$//g') if [[ "$PROMISE_TEXT" = "$COMPLETION_PROMISE" ]]; then echo "✅ Ralph loop: Detected completion" rm "$RALPH_STATE_FILE" exit 0 fi fi # Continue loop - increment iteration NEXT_ITERATION=$((ITERATION + 1)) # Extract prompt from markdown body PROMPT_TEXT=$(awk '/^---$/{i++; next} i>=2' "$RALPH_STATE_FILE") # Update iteration counter TEMP_FILE="${RALPH_STATE_FILE}.tmp.$$" sed "s/^iteration: .*/iteration: $NEXT_ITERATION/" "$RALPH_STATE_FILE" > "$TEMP_FILE" mv "$TEMP_FILE" "$RALPH_STATE_FILE" # Block exit and feed prompt back jq -n \ --arg prompt "$PROMPT_TEXT" \ --arg msg "🔄 Ralph iteration $NEXT_ITERATION" \ '{ "decision": "block", "reason": $prompt, "systemMessage": $msg }' exit 0 ``` **Key patterns:** 1. **Quick exit** (line 7-9): Skip if not active 2. **Iteration tracking** (lines 11-20): Count and enforce max iterations 3. **Promise detection** (lines 25-33): Check for completion signal in output 4. **Prompt extraction** (line 38): Read markdown body as next prompt 5. **State update** (lines 40-43): Increment iteration atomically 6. **Loop continuation** (lines 45-53): Block exit and feed prompt back ### Creation **File:** `scripts/setup-ralph-loop.sh` ```bash #!/bin/bash PROMPT="$1" MAX_ITERATIONS="${2:-0}" COMPLETION_PROMISE="${3:-}" # Create state file cat > ".claude/ralph-loop.local.md" < "$TEMP_FILE" mv "$TEMP_FILE" "$FILE" ``` **Why:** Prevents corruption if process is interrupted. ### 4. Quote Handling Both strip surrounding quotes from YAML values: ```bash sed 's/^"\(.*\)"$/\1/' ``` **Why:** YAML allows both `field: value` and `field: "value"`. ### 5. Error Handling Both handle missing/corrupt files gracefully: ```bash if [[ ! -f "$FILE" ]]; then exit 0 # No error, just not configured fi if [[ -z "$CRITICAL_FIELD" ]]; then echo "Settings file corrupt" >&2 rm "$FILE" # Clean up exit 0 fi ``` **Why:** Fails gracefully instead of crashing. ## Anti-Patterns to Avoid ### ❌ Hardcoded Paths ```bash # BAD FILE="/Users/alice/.claude/my-plugin.local.md" # GOOD FILE=".claude/my-plugin.local.md" ``` ### ❌ Unquoted Variables ```bash # BAD echo $VALUE # GOOD echo "$VALUE" ``` ### ❌ Non-Atomic Updates ```bash # BAD: Can corrupt file if interrupted sed -i "s/field: .*/field: $VALUE/" "$FILE" # GOOD: Atomic TEMP_FILE="${FILE}.tmp.$$" sed "s/field: .*/field: $VALUE/" "$FILE" > "$TEMP_FILE" mv "$TEMP_FILE" "$FILE" ``` ### ❌ No Default Values ```bash # BAD: Fails if field missing if [[ $MAX -gt 100 ]]; then # MAX might be empty! fi # GOOD: Provide default MAX=${MAX:-10} ``` ### ❌ Ignoring Edge Cases ```bash # BAD: Assumes exactly 2 --- markers sed -n '/^---$/,/^---$/{ /^---$/d; p; }' # GOOD: Handles --- in body awk '/^---$/{i++; next} i>=2' # For body ``` ## Conclusion The `.claude/plugin-name.local.md` pattern provides: - Simple, human-readable configuration - Version-control friendly (gitignored) - Per-project settings - Easy parsing with standard bash tools - Supports both structured config (YAML) and freeform content (markdown) Use this pattern for any plugin that needs user-configurable behavior or state persistence. ================================================ FILE: plugins/plugin-dev/skills/plugin-settings/scripts/parse-frontmatter.sh ================================================ #!/bin/bash # Frontmatter Parser Utility # Extracts YAML frontmatter from .local.md files set -euo pipefail # Usage show_usage() { echo "Usage: $0 [field-name]" echo "" echo "Examples:" echo " # Show all frontmatter" echo " $0 .claude/my-plugin.local.md" echo "" echo " # Extract specific field" echo " $0 .claude/my-plugin.local.md enabled" echo "" echo " # Extract and use in script" echo " ENABLED=\$($0 .claude/my-plugin.local.md enabled)" exit 0 } if [ $# -eq 0 ] || [ "$1" = "-h" ] || [ "$1" = "--help" ]; then show_usage fi FILE="$1" FIELD="${2:-}" # Validate file if [ ! -f "$FILE" ]; then echo "Error: File not found: $FILE" >&2 exit 1 fi # Extract frontmatter FRONTMATTER=$(sed -n '/^---$/,/^---$/{ /^---$/d; p; }' "$FILE") if [ -z "$FRONTMATTER" ]; then echo "Error: No frontmatter found in $FILE" >&2 exit 1 fi # If no field specified, output all frontmatter if [ -z "$FIELD" ]; then echo "$FRONTMATTER" exit 0 fi # Extract specific field VALUE=$(echo "$FRONTMATTER" | grep "^${FIELD}:" | sed "s/${FIELD}: *//" | sed 's/^"\(.*\)"$/\1/' | sed "s/^'\\(.*\\)'$/\\1/") if [ -z "$VALUE" ]; then echo "Error: Field '$FIELD' not found in frontmatter" >&2 exit 1 fi echo "$VALUE" exit 0 ================================================ FILE: plugins/plugin-dev/skills/plugin-settings/scripts/validate-settings.sh ================================================ #!/bin/bash # Settings File Validator # Validates .claude/plugin-name.local.md structure set -euo pipefail # Usage if [ $# -eq 0 ]; then echo "Usage: $0 " echo "" echo "Validates plugin settings file for:" echo " - File existence and readability" echo " - YAML frontmatter structure" echo " - Required --- markers" echo " - Field format" echo "" echo "Example: $0 .claude/my-plugin.local.md" exit 1 fi SETTINGS_FILE="$1" echo "🔍 Validating settings file: $SETTINGS_FILE" echo "" # Check 1: File exists if [ ! -f "$SETTINGS_FILE" ]; then echo "❌ File not found: $SETTINGS_FILE" exit 1 fi echo "✅ File exists" # Check 2: File is readable if [ ! -r "$SETTINGS_FILE" ]; then echo "❌ File is not readable" exit 1 fi echo "✅ File is readable" # Check 3: Has frontmatter markers MARKER_COUNT=$(grep -c '^---$' "$SETTINGS_FILE" 2>/dev/null || echo "0") if [ "$MARKER_COUNT" -lt 2 ]; then echo "❌ Invalid frontmatter: found $MARKER_COUNT '---' markers (need at least 2)" echo " Expected format:" echo " ---" echo " field: value" echo " ---" echo " Content..." exit 1 fi echo "✅ Frontmatter markers present" # Check 4: Extract and validate frontmatter FRONTMATTER=$(sed -n '/^---$/,/^---$/{ /^---$/d; p; }' "$SETTINGS_FILE") if [ -z "$FRONTMATTER" ]; then echo "❌ Empty frontmatter (nothing between --- markers)" exit 1 fi echo "✅ Frontmatter not empty" # Check 5: Frontmatter has valid YAML-like structure if ! echo "$FRONTMATTER" | grep -q ':'; then echo "⚠️ Warning: Frontmatter has no key:value pairs" fi # Check 6: Look for common fields echo "" echo "Detected fields:" echo "$FRONTMATTER" | grep '^[a-z_][a-z0-9_]*:' | while IFS=':' read -r key value; do echo " - $key: ${value:0:50}" done # Check 7: Validate common boolean fields for field in enabled strict_mode; do VALUE=$(echo "$FRONTMATTER" | grep "^${field}:" | sed "s/${field}: *//" || true) if [ -n "$VALUE" ]; then if [ "$VALUE" != "true" ] && [ "$VALUE" != "false" ]; then echo "⚠️ Field '$field' should be boolean (true/false), got: $VALUE" fi fi done # Check 8: Check body exists BODY=$(awk '/^---$/{i++; next} i>=2' "$SETTINGS_FILE") echo "" if [ -n "$BODY" ]; then BODY_LINES=$(echo "$BODY" | wc -l | tr -d ' ') echo "✅ Markdown body present ($BODY_LINES lines)" else echo "⚠️ No markdown body (frontmatter only)" fi echo "" echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" echo "✅ Settings file structure is valid" echo "" echo "Reminder: Changes to this file require restarting Claude Code" exit 0 ================================================ FILE: plugins/plugin-dev/skills/plugin-structure/README.md ================================================ # Plugin Structure Skill Comprehensive guidance on Claude Code plugin architecture, directory layout, and best practices. ## Overview This skill provides detailed knowledge about: - Plugin directory structure and organization - `plugin.json` manifest configuration - Component organization (commands, agents, skills, hooks) - Auto-discovery mechanisms - Portable path references with `${CLAUDE_PLUGIN_ROOT}` - File naming conventions ## Skill Structure ### SKILL.md (1,619 words) Core skill content covering: - Directory structure overview - Plugin manifest (plugin.json) fields - Component organization patterns - ${CLAUDE_PLUGIN_ROOT} usage - File naming conventions - Auto-discovery mechanism - Best practices - Common patterns - Troubleshooting ### References Detailed documentation for deep dives: - **manifest-reference.md**: Complete `plugin.json` field reference - All field descriptions and examples - Path resolution rules - Validation guidelines - Minimal vs. complete manifest examples - **component-patterns.md**: Advanced organization patterns - Component lifecycle (discovery, activation) - Command organization patterns - Agent organization patterns - Skill organization patterns - Hook organization patterns - Script organization patterns - Cross-component patterns - Best practices for scalability ### Examples Three complete plugin examples: - **minimal-plugin.md**: Simplest possible plugin - Single command - Minimal manifest - When to use this pattern - **standard-plugin.md**: Well-structured production plugin - Multiple components (commands, agents, skills, hooks) - Complete manifest with metadata - Rich skill structure - Integration between components - **advanced-plugin.md**: Enterprise-grade plugin - Multi-level organization - MCP server integration - Shared libraries - Configuration management - Security automation - Monitoring integration ## When This Skill Triggers Claude Code activates this skill when users: - Ask to "create a plugin" or "scaffold a plugin" - Need to "understand plugin structure" - Want to "organize plugin components" - Need to "set up plugin.json" - Ask about "${CLAUDE_PLUGIN_ROOT}" usage - Want to "add commands/agents/skills/hooks" - Need "configure auto-discovery" help - Ask about plugin architecture or best practices ## Progressive Disclosure The skill uses progressive disclosure to manage context: 1. **SKILL.md** (~1600 words): Core concepts and workflows 2. **References** (~6000 words): Detailed field references and patterns 3. **Examples** (~8000 words): Complete working examples Claude loads references and examples only as needed based on the task. ## Related Skills This skill works well with: - **hook-development**: For creating plugin hooks - **mcp-integration**: For integrating MCP servers (when available) - **marketplace-publishing**: For publishing plugins (when available) ## Maintenance To update this skill: 1. Keep SKILL.md lean and focused on core concepts 2. Move detailed information to references/ 3. Add new examples/ for common patterns 4. Update version in SKILL.md frontmatter 5. Ensure all documentation uses imperative/infinitive form ================================================ FILE: plugins/plugin-dev/skills/plugin-structure/SKILL.md ================================================ --- name: Plugin Structure description: This skill should be used when the user asks to "create a plugin", "scaffold a plugin", "understand plugin structure", "organize plugin components", "set up plugin.json", "use ${CLAUDE_PLUGIN_ROOT}", "add commands/agents/skills/hooks", "configure auto-discovery", or needs guidance on plugin directory layout, manifest configuration, component organization, file naming conventions, or Claude Code plugin architecture best practices. version: 0.1.0 --- # Plugin Structure for Claude Code ## Overview Claude Code plugins follow a standardized directory structure with automatic component discovery. Understanding this structure enables creating well-organized, maintainable plugins that integrate seamlessly with Claude Code. **Key concepts:** - Conventional directory layout for automatic discovery - Manifest-driven configuration in `.claude-plugin/plugin.json` - Component-based organization (commands, agents, skills, hooks) - Portable path references using `${CLAUDE_PLUGIN_ROOT}` - Explicit vs. auto-discovered component loading ## Directory Structure Every Claude Code plugin follows this organizational pattern: ``` plugin-name/ ├── .claude-plugin/ │ └── plugin.json # Required: Plugin manifest ├── commands/ # Slash commands (.md files) ├── agents/ # Subagent definitions (.md files) ├── skills/ # Agent skills (subdirectories) │ └── skill-name/ │ └── SKILL.md # Required for each skill ├── hooks/ │ └── hooks.json # Event handler configuration ├── .mcp.json # MCP server definitions └── scripts/ # Helper scripts and utilities ``` **Critical rules:** 1. **Manifest location**: The `plugin.json` manifest MUST be in `.claude-plugin/` directory 2. **Component locations**: All component directories (commands, agents, skills, hooks) MUST be at plugin root level, NOT nested inside `.claude-plugin/` 3. **Optional components**: Only create directories for components the plugin actually uses 4. **Naming convention**: Use kebab-case for all directory and file names ## Plugin Manifest (plugin.json) The manifest defines plugin metadata and configuration. Located at `.claude-plugin/plugin.json`: ### Required Fields ```json { "name": "plugin-name" } ``` **Name requirements:** - Use kebab-case format (lowercase with hyphens) - Must be unique across installed plugins - No spaces or special characters - Example: `code-review-assistant`, `test-runner`, `api-docs` ### Recommended Metadata ```json { "name": "plugin-name", "version": "1.0.0", "description": "Brief explanation of plugin purpose", "author": { "name": "Author Name", "email": "author@example.com", "url": "https://example.com" }, "homepage": "https://docs.example.com", "repository": "https://github.com/user/plugin-name", "license": "MIT", "keywords": ["testing", "automation", "ci-cd"] } ``` **Version format**: Follow semantic versioning (MAJOR.MINOR.PATCH) **Keywords**: Use for plugin discovery and categorization ### Component Path Configuration Specify custom paths for components (supplements default directories): ```json { "name": "plugin-name", "commands": "./custom-commands", "agents": ["./agents", "./specialized-agents"], "hooks": "./config/hooks.json", "mcpServers": "./.mcp.json" } ``` **Important**: Custom paths supplement defaults—they don't replace them. Components in both default directories and custom paths will load. **Path rules:** - Must be relative to plugin root - Must start with `./` - Cannot use absolute paths - Support arrays for multiple locations ## Component Organization ### Commands **Location**: `commands/` directory **Format**: Markdown files with YAML frontmatter **Auto-discovery**: All `.md` files in `commands/` load automatically **Example structure**: ``` commands/ ├── review.md # /review command ├── test.md # /test command └── deploy.md # /deploy command ``` **File format**: ```markdown --- name: command-name description: Command description --- Command implementation instructions... ``` **Usage**: Commands integrate as native slash commands in Claude Code ### Agents **Location**: `agents/` directory **Format**: Markdown files with YAML frontmatter **Auto-discovery**: All `.md` files in `agents/` load automatically **Example structure**: ``` agents/ ├── code-reviewer.md ├── test-generator.md └── refactorer.md ``` **File format**: ```markdown --- description: Agent role and expertise capabilities: - Specific task 1 - Specific task 2 --- Detailed agent instructions and knowledge... ``` **Usage**: Users can invoke agents manually, or Claude Code selects them automatically based on task context ### Skills **Location**: `skills/` directory with subdirectories per skill **Format**: Each skill in its own directory with `SKILL.md` file **Auto-discovery**: All `SKILL.md` files in skill subdirectories load automatically **Example structure**: ``` skills/ ├── api-testing/ │ ├── SKILL.md │ ├── scripts/ │ │ └── test-runner.py │ └── references/ │ └── api-spec.md └── database-migrations/ ├── SKILL.md └── examples/ └── migration-template.sql ``` **SKILL.md format**: ```markdown --- name: Skill Name description: When to use this skill version: 1.0.0 --- Skill instructions and guidance... ``` **Supporting files**: Skills can include scripts, references, examples, or assets in subdirectories **Usage**: Claude Code autonomously activates skills based on task context matching the description ### Hooks **Location**: `hooks/hooks.json` or inline in `plugin.json` **Format**: JSON configuration defining event handlers **Registration**: Hooks register automatically when plugin enables **Example structure**: ``` hooks/ ├── hooks.json # Hook configuration └── scripts/ ├── validate.sh # Hook script └── check-style.sh # Hook script ``` **Configuration format**: ```json { "PreToolUse": [{ "matcher": "Write|Edit", "hooks": [{ "type": "command", "command": "bash ${CLAUDE_PLUGIN_ROOT}/hooks/scripts/validate.sh", "timeout": 30 }] }] } ``` **Available events**: PreToolUse, PostToolUse, Stop, SubagentStop, SessionStart, SessionEnd, UserPromptSubmit, PreCompact, Notification **Usage**: Hooks execute automatically in response to Claude Code events ### MCP Servers **Location**: `.mcp.json` at plugin root or inline in `plugin.json` **Format**: JSON configuration for MCP server definitions **Auto-start**: Servers start automatically when plugin enables **Example format**: ```json { "mcpServers": { "server-name": { "command": "node", "args": ["${CLAUDE_PLUGIN_ROOT}/servers/server.js"], "env": { "API_KEY": "${API_KEY}" } } } } ``` **Usage**: MCP servers integrate seamlessly with Claude Code's tool system ## Portable Path References ### ${CLAUDE_PLUGIN_ROOT} Use `${CLAUDE_PLUGIN_ROOT}` environment variable for all intra-plugin path references: ```json { "command": "bash ${CLAUDE_PLUGIN_ROOT}/scripts/run.sh" } ``` **Why it matters**: Plugins install in different locations depending on: - User installation method (marketplace, local, npm) - Operating system conventions - User preferences **Where to use it**: - Hook command paths - MCP server command arguments - Script execution references - Resource file paths **Never use**: - Hardcoded absolute paths (`/Users/name/plugins/...`) - Relative paths from working directory (`./scripts/...` in commands) - Home directory shortcuts (`~/plugins/...`) ### Path Resolution Rules **In manifest JSON fields** (hooks, MCP servers): ```json "command": "${CLAUDE_PLUGIN_ROOT}/scripts/tool.sh" ``` **In component files** (commands, agents, skills): ```markdown Reference scripts at: ${CLAUDE_PLUGIN_ROOT}/scripts/helper.py ``` **In executed scripts**: ```bash #!/bin/bash # ${CLAUDE_PLUGIN_ROOT} available as environment variable source "${CLAUDE_PLUGIN_ROOT}/lib/common.sh" ``` ## File Naming Conventions ### Component Files **Commands**: Use kebab-case `.md` files - `code-review.md` → `/code-review` - `run-tests.md` → `/run-tests` - `api-docs.md` → `/api-docs` **Agents**: Use kebab-case `.md` files describing role - `test-generator.md` - `code-reviewer.md` - `performance-analyzer.md` **Skills**: Use kebab-case directory names - `api-testing/` - `database-migrations/` - `error-handling/` ### Supporting Files **Scripts**: Use descriptive kebab-case names with appropriate extensions - `validate-input.sh` - `generate-report.py` - `process-data.js` **Documentation**: Use kebab-case markdown files - `api-reference.md` - `migration-guide.md` - `best-practices.md` **Configuration**: Use standard names - `hooks.json` - `.mcp.json` - `plugin.json` ## Auto-Discovery Mechanism Claude Code automatically discovers and loads components: 1. **Plugin manifest**: Reads `.claude-plugin/plugin.json` when plugin enables 2. **Commands**: Scans `commands/` directory for `.md` files 3. **Agents**: Scans `agents/` directory for `.md` files 4. **Skills**: Scans `skills/` for subdirectories containing `SKILL.md` 5. **Hooks**: Loads configuration from `hooks/hooks.json` or manifest 6. **MCP servers**: Loads configuration from `.mcp.json` or manifest **Discovery timing**: - Plugin installation: Components register with Claude Code - Plugin enable: Components become available for use - No restart required: Changes take effect on next Claude Code session **Override behavior**: Custom paths in `plugin.json` supplement (not replace) default directories ## Best Practices ### Organization 1. **Logical grouping**: Group related components together - Put test-related commands, agents, and skills together - Create subdirectories in `scripts/` for different purposes 2. **Minimal manifest**: Keep `plugin.json` lean - Only specify custom paths when necessary - Rely on auto-discovery for standard layouts - Use inline configuration only for simple cases 3. **Documentation**: Include README files - Plugin root: Overall purpose and usage - Component directories: Specific guidance - Script directories: Usage and requirements ### Naming 1. **Consistency**: Use consistent naming across components - If command is `test-runner`, name related agent `test-runner-agent` - Match skill directory names to their purpose 2. **Clarity**: Use descriptive names that indicate purpose - Good: `api-integration-testing/`, `code-quality-checker.md` - Avoid: `utils/`, `misc.md`, `temp.sh` 3. **Length**: Balance brevity with clarity - Commands: 2-3 words (`review-pr`, `run-ci`) - Agents: Describe role clearly (`code-reviewer`, `test-generator`) - Skills: Topic-focused (`error-handling`, `api-design`) ### Portability 1. **Always use ${CLAUDE_PLUGIN_ROOT}**: Never hardcode paths 2. **Test on multiple systems**: Verify on macOS, Linux, Windows 3. **Document dependencies**: List required tools and versions 4. **Avoid system-specific features**: Use portable bash/Python constructs ### Maintenance 1. **Version consistently**: Update version in plugin.json for releases 2. **Deprecate gracefully**: Mark old components clearly before removal 3. **Document breaking changes**: Note changes affecting existing users 4. **Test thoroughly**: Verify all components work after changes ## Common Patterns ### Minimal Plugin Single command with no dependencies: ``` my-plugin/ ├── .claude-plugin/ │ └── plugin.json # Just name field └── commands/ └── hello.md # Single command ``` ### Full-Featured Plugin Complete plugin with all component types: ``` my-plugin/ ├── .claude-plugin/ │ └── plugin.json ├── commands/ # User-facing commands ├── agents/ # Specialized subagents ├── skills/ # Auto-activating skills ├── hooks/ # Event handlers │ ├── hooks.json │ └── scripts/ ├── .mcp.json # External integrations └── scripts/ # Shared utilities ``` ### Skill-Focused Plugin Plugin providing only skills: ``` my-plugin/ ├── .claude-plugin/ │ └── plugin.json └── skills/ ├── skill-one/ │ └── SKILL.md └── skill-two/ └── SKILL.md ``` ## Troubleshooting **Component not loading**: - Verify file is in correct directory with correct extension - Check YAML frontmatter syntax (commands, agents, skills) - Ensure skill has `SKILL.md` (not `README.md` or other name) - Confirm plugin is enabled in Claude Code settings **Path resolution errors**: - Replace all hardcoded paths with `${CLAUDE_PLUGIN_ROOT}` - Verify paths are relative and start with `./` in manifest - Check that referenced files exist at specified paths - Test with `echo $CLAUDE_PLUGIN_ROOT` in hook scripts **Auto-discovery not working**: - Confirm directories are at plugin root (not in `.claude-plugin/`) - Check file naming follows conventions (kebab-case, correct extensions) - Verify custom paths in manifest are correct - Restart Claude Code to reload plugin configuration **Conflicts between plugins**: - Use unique, descriptive component names - Namespace commands with plugin name if needed - Document potential conflicts in plugin README - Consider command prefixes for related functionality --- For detailed examples and advanced patterns, see files in `references/` and `examples/` directories. ================================================ FILE: plugins/plugin-dev/skills/plugin-structure/examples/advanced-plugin.md ================================================ # Advanced Plugin Example A complex, enterprise-grade plugin with MCP integration and advanced organization. ## Directory Structure ``` enterprise-devops/ ├── .claude-plugin/ │ └── plugin.json ├── commands/ │ ├── ci/ │ │ ├── build.md │ │ ├── test.md │ │ └── deploy.md │ ├── monitoring/ │ │ ├── status.md │ │ └── logs.md │ └── admin/ │ ├── configure.md │ └── manage.md ├── agents/ │ ├── orchestration/ │ │ ├── deployment-orchestrator.md │ │ └── rollback-manager.md │ └── specialized/ │ ├── kubernetes-expert.md │ ├── terraform-expert.md │ └── security-auditor.md ├── skills/ │ ├── kubernetes-ops/ │ │ ├── SKILL.md │ │ ├── references/ │ │ │ ├── deployment-patterns.md │ │ │ ├── troubleshooting.md │ │ │ └── security.md │ │ ├── examples/ │ │ │ ├── basic-deployment.yaml │ │ │ ├── stateful-set.yaml │ │ │ └── ingress-config.yaml │ │ └── scripts/ │ │ ├── validate-manifest.sh │ │ └── health-check.sh │ ├── terraform-iac/ │ │ ├── SKILL.md │ │ ├── references/ │ │ │ └── best-practices.md │ │ └── examples/ │ │ └── module-template/ │ └── ci-cd-pipelines/ │ ├── SKILL.md │ └── references/ │ └── pipeline-patterns.md ├── hooks/ │ ├── hooks.json │ └── scripts/ │ ├── security/ │ │ ├── scan-secrets.sh │ │ ├── validate-permissions.sh │ │ └── audit-changes.sh │ ├── quality/ │ │ ├── check-config.sh │ │ └── verify-tests.sh │ └── workflow/ │ ├── notify-team.sh │ └── update-status.sh ├── .mcp.json ├── servers/ │ ├── kubernetes-mcp/ │ │ ├── index.js │ │ ├── package.json │ │ └── lib/ │ ├── terraform-mcp/ │ │ ├── main.py │ │ └── requirements.txt │ └── github-actions-mcp/ │ ├── server.js │ └── package.json ├── lib/ │ ├── core/ │ │ ├── logger.js │ │ ├── config.js │ │ └── auth.js │ ├── integrations/ │ │ ├── slack.js │ │ ├── pagerduty.js │ │ └── datadog.js │ └── utils/ │ ├── retry.js │ └── validation.js └── config/ ├── environments/ │ ├── production.json │ ├── staging.json │ └── development.json └── templates/ ├── deployment.yaml └── service.yaml ``` ## File Contents ### .claude-plugin/plugin.json ```json { "name": "enterprise-devops", "version": "2.3.1", "description": "Comprehensive DevOps automation for enterprise CI/CD pipelines, infrastructure management, and monitoring", "author": { "name": "DevOps Platform Team", "email": "devops-platform@company.com", "url": "https://company.com/teams/devops" }, "homepage": "https://docs.company.com/plugins/devops", "repository": { "type": "git", "url": "https://github.com/company/devops-plugin.git" }, "license": "Apache-2.0", "keywords": [ "devops", "ci-cd", "kubernetes", "terraform", "automation", "infrastructure", "deployment", "monitoring" ], "commands": [ "./commands/ci", "./commands/monitoring", "./commands/admin" ], "agents": [ "./agents/orchestration", "./agents/specialized" ], "hooks": "./hooks/hooks.json", "mcpServers": "./.mcp.json" } ``` ### .mcp.json ```json { "mcpServers": { "kubernetes": { "command": "node", "args": ["${CLAUDE_PLUGIN_ROOT}/servers/kubernetes-mcp/index.js"], "env": { "KUBECONFIG": "${KUBECONFIG}", "K8S_NAMESPACE": "${K8S_NAMESPACE:-default}" } }, "terraform": { "command": "python", "args": ["${CLAUDE_PLUGIN_ROOT}/servers/terraform-mcp/main.py"], "env": { "TF_STATE_BUCKET": "${TF_STATE_BUCKET}", "AWS_REGION": "${AWS_REGION}" } }, "github-actions": { "command": "node", "args": ["${CLAUDE_PLUGIN_ROOT}/servers/github-actions-mcp/server.js"], "env": { "GITHUB_TOKEN": "${GITHUB_TOKEN}", "GITHUB_ORG": "${GITHUB_ORG}" } } } } ``` ### commands/ci/build.md ```markdown --- name: build description: Trigger and monitor CI build pipeline --- # Build Command Trigger CI/CD build pipeline and monitor progress in real-time. ## Process 1. **Validation**: Check prerequisites - Verify branch status - Check for uncommitted changes - Validate configuration files 2. **Trigger**: Start build via MCP server \`\`\`javascript // Uses github-actions MCP server const build = await tools.github_actions_trigger_workflow({ workflow: 'build.yml', ref: currentBranch }) \`\`\` 3. **Monitor**: Track build progress - Display real-time logs - Show test results as they complete - Alert on failures 4. **Report**: Summarize results - Build status - Test coverage - Performance metrics - Deploy readiness ## Integration After successful build: - Offer to deploy to staging - Suggest performance optimizations - Generate deployment checklist ``` ### agents/orchestration/deployment-orchestrator.md ```markdown --- description: Orchestrates complex multi-environment deployments with rollback capabilities and health monitoring capabilities: - Plan and execute multi-stage deployments - Coordinate service dependencies - Monitor deployment health - Execute automated rollbacks - Manage deployment approvals --- # Deployment Orchestrator Agent Specialized agent for orchestrating complex deployments across multiple environments. ## Expertise - **Deployment strategies**: Blue-green, canary, rolling updates - **Dependency management**: Service startup ordering, dependency injection - **Health monitoring**: Service health checks, metric validation - **Rollback automation**: Automatic rollback on failure detection - **Approval workflows**: Multi-stage approval processes ## Orchestration Process 1. **Planning Phase** - Analyze deployment requirements - Identify service dependencies - Generate deployment plan - Calculate rollback strategy 2. **Validation Phase** - Verify environment readiness - Check resource availability - Validate configurations - Run pre-deployment tests 3. **Execution Phase** - Deploy services in dependency order - Monitor health after each stage - Validate metrics and logs - Proceed to next stage on success 4. **Verification Phase** - Run smoke tests - Validate service integration - Check performance metrics - Confirm deployment success 5. **Rollback Phase** (if needed) - Detect failure conditions - Execute rollback plan - Restore previous state - Notify stakeholders ## MCP Integration Uses multiple MCP servers: - `kubernetes`: Deploy and manage containers - `terraform`: Provision infrastructure - `github-actions`: Trigger deployment pipelines ## Monitoring Integration Integrates with monitoring tools via lib: \`\`\`javascript const { DatadogClient } = require('${CLAUDE_PLUGIN_ROOT}/lib/integrations/datadog') const metrics = await DatadogClient.getMetrics(service, timeRange) \`\`\` ## Notification Integration Sends updates via Slack and PagerDuty: \`\`\`javascript const { SlackClient } = require('${CLAUDE_PLUGIN_ROOT}/lib/integrations/slack') await SlackClient.notify({ channel: '#deployments', message: 'Deployment started', metadata: deploymentPlan }) \`\`\` ``` ### skills/kubernetes-ops/SKILL.md ```markdown --- name: Kubernetes Operations description: This skill should be used when deploying to Kubernetes, managing K8s resources, troubleshooting cluster issues, configuring ingress/services, scaling deployments, or working with Kubernetes manifests. Provides comprehensive Kubernetes operational knowledge and best practices. version: 2.0.0 --- # Kubernetes Operations Comprehensive operational knowledge for managing Kubernetes clusters and workloads. ## Overview Manage Kubernetes infrastructure effectively through: - Deployment strategies and patterns - Resource configuration and optimization - Troubleshooting and debugging - Security best practices - Performance tuning ## Core Concepts ### Resource Management **Deployments**: Use for stateless applications - Rolling updates for zero-downtime deployments - Rollback capabilities for failed deployments - Replica management for scaling **StatefulSets**: Use for stateful applications - Stable network identities - Persistent storage - Ordered deployment and scaling **DaemonSets**: Use for node-level services - Log collectors - Monitoring agents - Network plugins ### Configuration **ConfigMaps**: Store non-sensitive configuration - Environment-specific settings - Application configuration files - Feature flags **Secrets**: Store sensitive data - API keys and tokens - Database credentials - TLS certificates Use external secret management (Vault, AWS Secrets Manager) for production. ### Networking **Services**: Expose applications internally - ClusterIP for internal communication - NodePort for external access (non-production) - LoadBalancer for external access (production) **Ingress**: HTTP/HTTPS routing - Path-based routing - Host-based routing - TLS termination - Load balancing ## Deployment Strategies ### Rolling Update Default strategy, gradual replacement: \`\`\`yaml strategy: type: RollingUpdate rollingUpdate: maxSurge: 1 maxUnavailable: 0 \`\`\` **When to use**: Standard deployments, minor updates ### Recreate Stop all pods, then create new ones: \`\`\`yaml strategy: type: Recreate \`\`\` **When to use**: Stateful apps that can't run multiple versions ### Blue-Green Run two complete environments, switch traffic: 1. Deploy new version (green) 2. Test green environment 3. Switch traffic to green 4. Keep blue for quick rollback **When to use**: Critical services, need instant rollback ### Canary Gradually roll out to subset of users: 1. Deploy canary version (10% traffic) 2. Monitor metrics and errors 3. Increase traffic gradually 4. Complete rollout or rollback **When to use**: High-risk changes, want gradual validation ## Resource Configuration ### Resource Requests and Limits Always set for production workloads: \`\`\`yaml resources: requests: memory: "256Mi" cpu: "250m" limits: memory: "512Mi" cpu: "500m" \`\`\` **Requests**: Guaranteed resources **Limits**: Maximum allowed resources ### Health Checks Essential for reliability: \`\`\`yaml livenessProbe: httpGet: path: /health port: 8080 initialDelaySeconds: 30 periodSeconds: 10 readinessProbe: httpGet: path: /ready port: 8080 initialDelaySeconds: 5 periodSeconds: 5 \`\`\` **Liveness**: Restart unhealthy pods **Readiness**: Remove unready pods from service ## Troubleshooting ### Common Issues 1. **Pods not starting** - Check: `kubectl describe pod ` - Look for: Image pull errors, resource constraints - Fix: Verify image name, increase resources 2. **Service not reachable** - Check: `kubectl get svc`, `kubectl get endpoints` - Look for: No endpoints, wrong selector - Fix: Verify pod labels match service selector 3. **High memory usage** - Check: `kubectl top pods` - Look for: Pods near memory limit - Fix: Increase limits, optimize application 4. **Frequent restarts** - Check: `kubectl get pods`, `kubectl logs ` - Look for: Liveness probe failures, OOMKilled - Fix: Adjust health checks, increase memory ### Debugging Commands Get pod details: \`\`\`bash kubectl describe pod kubectl logs kubectl logs --previous # logs from crashed container \`\`\` Execute commands in pod: \`\`\`bash kubectl exec -it -- /bin/sh kubectl exec -- env \`\`\` Check resource usage: \`\`\`bash kubectl top nodes kubectl top pods \`\`\` ## Security Best Practices ### Pod Security - Run as non-root user - Use read-only root filesystem - Drop unnecessary capabilities - Use security contexts Example: \`\`\`yaml securityContext: runAsNonRoot: true runAsUser: 1000 readOnlyRootFilesystem: true capabilities: drop: - ALL \`\`\` ### Network Policies Restrict pod communication: \`\`\`yaml apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: api-allow spec: podSelector: matchLabels: app: api ingress: - from: - podSelector: matchLabels: app: frontend \`\`\` ### Secrets Management - Never commit secrets to git - Use external secret managers - Rotate secrets regularly - Limit secret access with RBAC ## Performance Optimization ### Resource Tuning 1. **Start conservative**: Set low limits initially 2. **Monitor usage**: Track actual resource consumption 3. **Adjust gradually**: Increase based on metrics 4. **Set appropriate requests**: Match typical usage 5. **Set safe limits**: 2x requests for headroom ### Horizontal Pod Autoscaling Automatically scale based on metrics: \`\`\`yaml apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: name: api-hpa spec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment name: api minReplicas: 2 maxReplicas: 10 metrics: - type: Resource resource: name: cpu target: type: Utilization averageUtilization: 70 \`\`\` ## MCP Server Integration This skill works with the kubernetes MCP server for operations: **List pods**: \`\`\`javascript const pods = await tools.k8s_list_pods({ namespace: 'default' }) \`\`\` **Get pod logs**: \`\`\`javascript const logs = await tools.k8s_get_logs({ pod: 'api-xyz', container: 'app' }) \`\`\` **Apply manifests**: \`\`\`javascript const result = await tools.k8s_apply_manifest({ file: 'deployment.yaml' }) \`\`\` ## Detailed References For in-depth information: - **Deployment patterns**: `references/deployment-patterns.md` - **Troubleshooting guide**: `references/troubleshooting.md` - **Security hardening**: `references/security.md` ## Example Manifests For copy-paste examples: - **Basic deployment**: `examples/basic-deployment.yaml` - **StatefulSet**: `examples/stateful-set.yaml` - **Ingress config**: `examples/ingress-config.yaml` ## Validation Scripts For manifest validation: \`\`\`bash bash ${CLAUDE_PLUGIN_ROOT}/skills/kubernetes-ops/scripts/validate-manifest.sh deployment.yaml \`\`\` ``` ### hooks/hooks.json ```json { "PreToolUse": [ { "matcher": "Write|Edit", "hooks": [ { "type": "command", "command": "bash ${CLAUDE_PLUGIN_ROOT}/hooks/scripts/security/scan-secrets.sh", "timeout": 30 } ] }, { "matcher": "Bash", "hooks": [ { "type": "prompt", "prompt": "Evaluate if this bash command is safe for production environment. Check for destructive operations, missing safeguards, and potential security issues. Commands should be idempotent and reversible.", "timeout": 20 } ] } ], "PostToolUse": [ { "matcher": "Bash", "hooks": [ { "type": "command", "command": "bash ${CLAUDE_PLUGIN_ROOT}/hooks/scripts/workflow/update-status.sh", "timeout": 15 } ] } ], "Stop": [ { "matcher": ".*", "hooks": [ { "type": "command", "command": "bash ${CLAUDE_PLUGIN_ROOT}/hooks/scripts/quality/check-config.sh", "timeout": 45 }, { "type": "command", "command": "bash ${CLAUDE_PLUGIN_ROOT}/hooks/scripts/workflow/notify-team.sh", "timeout": 30 } ] } ], "SessionStart": [ { "matcher": ".*", "hooks": [ { "type": "command", "command": "bash ${CLAUDE_PLUGIN_ROOT}/hooks/scripts/security/validate-permissions.sh", "timeout": 20 } ] } ] } ``` ## Key Features ### Multi-Level Organization **Commands**: Organized by function (CI, monitoring, admin) **Agents**: Separated by role (orchestration vs. specialized) **Skills**: Rich resources (references, examples, scripts) ### MCP Integration Three custom MCP servers: - **Kubernetes**: Cluster operations - **Terraform**: Infrastructure provisioning - **GitHub Actions**: CI/CD automation ### Shared Libraries Reusable code in `lib/`: - **Core**: Common utilities (logging, config, auth) - **Integrations**: External services (Slack, Datadog) - **Utils**: Helper functions (retry, validation) ### Configuration Management Environment-specific configs in `config/`: - **Environments**: Per-environment settings - **Templates**: Reusable deployment templates ### Security Automation Multiple security hooks: - Secret scanning before writes - Permission validation on session start - Configuration auditing on completion ### Monitoring Integration Built-in monitoring via lib integrations: - Datadog for metrics - PagerDuty for alerts - Slack for notifications ## Use Cases 1. **Multi-environment deployments**: Orchestrated rollouts across dev/staging/prod 2. **Infrastructure as code**: Terraform automation with state management 3. **CI/CD automation**: Build, test, deploy pipelines 4. **Monitoring and observability**: Integrated metrics and alerting 5. **Security enforcement**: Automated security scanning and validation 6. **Team collaboration**: Slack notifications and status updates ## When to Use This Pattern - Large-scale enterprise deployments - Multiple environment management - Complex CI/CD workflows - Integrated monitoring requirements - Security-critical infrastructure - Team collaboration needs ## Scaling Considerations - **Performance**: Separate MCP servers for parallel operations - **Organization**: Multi-level directories for scalability - **Maintainability**: Shared libraries reduce duplication - **Flexibility**: Environment configs enable customization - **Security**: Layered security hooks and validation ================================================ FILE: plugins/plugin-dev/skills/plugin-structure/examples/minimal-plugin.md ================================================ # Minimal Plugin Example A bare-bones plugin with a single command. ## Directory Structure ``` hello-world/ ├── .claude-plugin/ │ └── plugin.json └── commands/ └── hello.md ``` ## File Contents ### .claude-plugin/plugin.json ```json { "name": "hello-world" } ``` ### commands/hello.md ```markdown --- name: hello description: Prints a friendly greeting message --- # Hello Command Print a friendly greeting to the user. ## Implementation Output the following message to the user: > Hello! This is a simple command from the hello-world plugin. > > Use this as a starting point for building more complex plugins. Include the current timestamp in the greeting to show the command executed successfully. ``` ## Usage After installing the plugin: ``` $ claude > /hello Hello! This is a simple command from the hello-world plugin. Use this as a starting point for building more complex plugins. Executed at: 2025-01-15 14:30:22 UTC ``` ## Key Points 1. **Minimal manifest**: Only the required `name` field 2. **Single command**: One markdown file in `commands/` directory 3. **Auto-discovery**: Claude Code finds the command automatically 4. **No dependencies**: No scripts, hooks, or external resources ## When to Use This Pattern - Quick prototypes - Single-purpose utilities - Learning plugin development - Internal team tools with one specific function ## Extending This Plugin To add more functionality: 1. **Add commands**: Create more `.md` files in `commands/` 2. **Add metadata**: Update `plugin.json` with version, description, author 3. **Add agents**: Create `agents/` directory with agent definitions 4. **Add hooks**: Create `hooks/hooks.json` for event handling ================================================ FILE: plugins/plugin-dev/skills/plugin-structure/examples/standard-plugin.md ================================================ # Standard Plugin Example A well-structured plugin with commands, agents, and skills. ## Directory Structure ``` code-quality/ ├── .claude-plugin/ │ └── plugin.json ├── commands/ │ ├── lint.md │ ├── test.md │ └── review.md ├── agents/ │ ├── code-reviewer.md │ └── test-generator.md ├── skills/ │ ├── code-standards/ │ │ ├── SKILL.md │ │ └── references/ │ │ └── style-guide.md │ └── testing-patterns/ │ ├── SKILL.md │ └── examples/ │ ├── unit-test.js │ └── integration-test.js ├── hooks/ │ ├── hooks.json │ └── scripts/ │ └── validate-commit.sh └── scripts/ ├── run-linter.sh └── generate-report.py ``` ## File Contents ### .claude-plugin/plugin.json ```json { "name": "code-quality", "version": "1.0.0", "description": "Comprehensive code quality tools including linting, testing, and review automation", "author": { "name": "Quality Team", "email": "quality@example.com" }, "homepage": "https://docs.example.com/plugins/code-quality", "repository": "https://github.com/example/code-quality-plugin", "license": "MIT", "keywords": ["code-quality", "linting", "testing", "code-review", "automation"] } ``` ### commands/lint.md ```markdown --- name: lint description: Run linting checks on the codebase --- # Lint Command Run comprehensive linting checks on the project codebase. ## Process 1. Detect project type and installed linters 2. Run appropriate linters (ESLint, Pylint, RuboCop, etc.) 3. Collect and format results 4. Report issues with file locations and severity ## Implementation Execute the linting script: \`\`\`bash bash ${CLAUDE_PLUGIN_ROOT}/scripts/run-linter.sh \`\`\` Parse the output and present issues organized by: - Critical issues (must fix) - Warnings (should fix) - Style suggestions (optional) For each issue, show: - File path and line number - Issue description - Suggested fix (if available) ``` ### commands/test.md ```markdown --- name: test description: Run test suite with coverage reporting --- # Test Command Execute the project test suite and generate coverage reports. ## Process 1. Identify test framework (Jest, pytest, RSpec, etc.) 2. Run all tests 3. Generate coverage report 4. Identify untested code ## Output Present results in structured format: - Test summary (passed/failed/skipped) - Coverage percentage by file - Critical untested areas - Failed test details ## Integration After test completion, offer to: - Fix failing tests - Generate tests for untested code (using test-generator agent) - Update documentation based on test changes ``` ### agents/code-reviewer.md ```markdown --- description: Expert code reviewer specializing in identifying bugs, security issues, and improvement opportunities capabilities: - Analyze code for potential bugs and logic errors - Identify security vulnerabilities - Suggest performance improvements - Ensure code follows project standards - Review test coverage adequacy --- # Code Reviewer Agent Specialized agent for comprehensive code review. ## Expertise - **Bug detection**: Logic errors, edge cases, error handling - **Security analysis**: Injection vulnerabilities, authentication issues, data exposure - **Performance**: Algorithm efficiency, resource usage, optimization opportunities - **Standards compliance**: Style guide adherence, naming conventions, documentation - **Test coverage**: Adequacy of test cases, missing scenarios ## Review Process 1. **Initial scan**: Quick pass for obvious issues 2. **Deep analysis**: Line-by-line review of changed code 3. **Context evaluation**: Check impact on related code 4. **Best practices**: Compare against project and language standards 5. **Recommendations**: Prioritized list of improvements ## Integration with Skills Automatically loads `code-standards` skill for project-specific guidelines. ## Output Format For each file reviewed: - Overall assessment - Critical issues (must fix before merge) - Important issues (should fix) - Suggestions (nice to have) - Positive feedback (what was done well) ``` ### agents/test-generator.md ```markdown --- description: Generates comprehensive test suites from code analysis capabilities: - Analyze code structure and logic flow - Generate unit tests for functions and methods - Create integration tests for modules - Design edge case and error condition tests - Suggest test fixtures and mocks --- # Test Generator Agent Specialized agent for generating comprehensive test suites. ## Expertise - **Unit testing**: Individual function/method tests - **Integration testing**: Module interaction tests - **Edge cases**: Boundary conditions, error paths - **Test organization**: Proper test structure and naming - **Mocking**: Appropriate use of mocks and stubs ## Generation Process 1. **Code analysis**: Understand function purpose and logic 2. **Path identification**: Map all execution paths 3. **Input design**: Create test inputs covering all paths 4. **Assertion design**: Define expected outputs 5. **Test generation**: Write tests in project's framework ## Integration with Skills Automatically loads `testing-patterns` skill for project-specific test conventions. ## Test Quality Generated tests include: - Happy path scenarios - Edge cases and boundary conditions - Error handling verification - Mock data for external dependencies - Clear test descriptions ``` ### skills/code-standards/SKILL.md ```markdown --- name: Code Standards description: This skill should be used when reviewing code, enforcing style guidelines, checking naming conventions, or ensuring code quality standards. Provides project-specific coding standards and best practices. version: 1.0.0 --- # Code Standards Comprehensive coding standards and best practices for maintaining code quality. ## Overview Enforce consistent code quality through standardized conventions for: - Code style and formatting - Naming conventions - Documentation requirements - Error handling patterns - Security practices ## Style Guidelines ### Formatting - **Indentation**: 2 spaces (JavaScript/TypeScript), 4 spaces (Python) - **Line length**: Maximum 100 characters - **Braces**: Same line for opening brace (K&R style) - **Whitespace**: Space after commas, around operators ### Naming Conventions - **Variables**: camelCase for JavaScript, snake_case for Python - **Functions**: camelCase, descriptive verb-noun pairs - **Classes**: PascalCase - **Constants**: UPPER_SNAKE_CASE - **Files**: kebab-case for modules ## Documentation Requirements ### Function Documentation Every function must include: - Purpose description - Parameter descriptions with types - Return value description with type - Example usage (for public functions) ### Module Documentation Every module must include: - Module purpose - Public API overview - Usage examples - Dependencies ## Error Handling ### Required Practices - Never swallow errors silently - Always log errors with context - Use specific error types - Provide actionable error messages - Clean up resources in finally blocks ### Example Pattern \`\`\`javascript async function processData(data) { try { const result = await transform(data) return result } catch (error) { logger.error('Data processing failed', { data: sanitize(data), error: error.message, stack: error.stack }) throw new DataProcessingError('Failed to process data', { cause: error }) } } \`\`\` ## Security Practices - Validate all external input - Sanitize data before output - Use parameterized queries - Never log sensitive information - Keep dependencies updated ## Detailed Guidelines For comprehensive style guides by language, see: - `references/style-guide.md` ``` ### skills/code-standards/references/style-guide.md ```markdown # Comprehensive Style Guide Detailed style guidelines for all supported languages. ## JavaScript/TypeScript ### Variable Declarations Use `const` by default, `let` when reassignment needed, never `var`: \`\`\`javascript // Good const MAX_RETRIES = 3 let currentTry = 0 // Bad var MAX_RETRIES = 3 \`\`\` ### Function Declarations Use function expressions for consistency: \`\`\`javascript // Good const calculateTotal = (items) => { return items.reduce((sum, item) => sum + item.price, 0) } // Bad (inconsistent style) function calculateTotal(items) { return items.reduce((sum, item) => sum + item.price, 0) } \`\`\` ### Async/Await Prefer async/await over promise chains: \`\`\`javascript // Good async function fetchUserData(userId) { const user = await db.getUser(userId) const orders = await db.getOrders(user.id) return { user, orders } } // Bad function fetchUserData(userId) { return db.getUser(userId) .then(user => db.getOrders(user.id) .then(orders => ({ user, orders }))) } \`\`\` ## Python ### Import Organization Order imports: standard library, third-party, local: \`\`\`python # Good import os import sys import numpy as np import pandas as pd from app.models import User from app.utils import helper # Bad - mixed order from app.models import User import numpy as np import os \`\`\` ### Type Hints Use type hints for all function signatures: \`\`\`python # Good def calculate_average(numbers: list[float]) -> float: return sum(numbers) / len(numbers) # Bad def calculate_average(numbers): return sum(numbers) / len(numbers) \`\`\` ## Additional Languages See language-specific guides for: - Go: `references/go-style.md` - Rust: `references/rust-style.md` - Ruby: `references/ruby-style.md` ``` ### hooks/hooks.json ```json { "PreToolUse": [ { "matcher": "Write|Edit", "hooks": [ { "type": "prompt", "prompt": "Before modifying code, verify it meets our coding standards from the code-standards skill. Check formatting, naming conventions, and documentation. If standards aren't met, suggest improvements.", "timeout": 30 } ] } ], "Stop": [ { "matcher": ".*", "hooks": [ { "type": "command", "command": "bash ${CLAUDE_PLUGIN_ROOT}/hooks/scripts/validate-commit.sh", "timeout": 45 } ] } ] } ``` ### hooks/scripts/validate-commit.sh ```bash #!/bin/bash # Validate code quality before task completion set -e # Check if there are any uncommitted changes if [[ -z $(git status -s) ]]; then echo '{"systemMessage": "No changes to validate. Task complete."}' exit 0 fi # Run linter on changed files CHANGED_FILES=$(git diff --name-only --cached | grep -E '\.(js|ts|py)$' || true) if [[ -z "$CHANGED_FILES" ]]; then echo '{"systemMessage": "No code files changed. Validation passed."}' exit 0 fi # Run appropriate linters ISSUES=0 for file in $CHANGED_FILES; do case "$file" in *.js|*.ts) if ! npx eslint "$file" --quiet; then ISSUES=$((ISSUES + 1)) fi ;; *.py) if ! python -m pylint "$file" --errors-only; then ISSUES=$((ISSUES + 1)) fi ;; esac done if [[ $ISSUES -gt 0 ]]; then echo "{\"systemMessage\": \"Found $ISSUES code quality issues. Please fix before completing.\"}" exit 1 fi echo '{"systemMessage": "Code quality checks passed. Ready to commit."}' exit 0 ``` ## Usage Examples ### Running Commands ``` $ claude > /lint Running linter checks... Critical Issues (2): src/api/users.js:45 - SQL injection vulnerability src/utils/helpers.js:12 - Unhandled promise rejection Warnings (5): src/components/Button.tsx:23 - Missing PropTypes ... Style Suggestions (8): src/index.js:1 - Use const instead of let ... > /test Running test suite... Test Results: ✓ 245 passed ✗ 3 failed ○ 2 skipped Coverage: 87.3% Untested Files: src/utils/cache.js - 0% coverage src/api/webhooks.js - 23% coverage Failed Tests: 1. User API › GET /users › should handle pagination Expected 200, received 500 ... ``` ### Using Agents ``` > Review the changes in src/api/users.js [code-reviewer agent selected automatically] Code Review: src/api/users.js Critical Issues: 1. Line 45: SQL injection vulnerability - Using string concatenation for SQL query - Replace with parameterized query - Priority: CRITICAL 2. Line 67: Missing error handling - Database query without try/catch - Could crash server on DB error - Priority: HIGH Suggestions: 1. Line 23: Consider caching user data - Frequent DB queries for same users - Add Redis caching layer - Priority: MEDIUM ``` ## Key Points 1. **Complete manifest**: All recommended metadata fields 2. **Multiple components**: Commands, agents, skills, hooks 3. **Rich skills**: References and examples for detailed information 4. **Automation**: Hooks enforce standards automatically 5. **Integration**: Components work together cohesively ## When to Use This Pattern - Production plugins for distribution - Team collaboration tools - Plugins requiring consistency enforcement - Complex workflows with multiple entry points ================================================ FILE: plugins/plugin-dev/skills/plugin-structure/references/component-patterns.md ================================================ # Component Organization Patterns Advanced patterns for organizing plugin components effectively. ## Component Lifecycle ### Discovery Phase When Claude Code starts: 1. **Scan enabled plugins**: Read `.claude-plugin/plugin.json` for each 2. **Discover components**: Look in default and custom paths 3. **Parse definitions**: Read YAML frontmatter and configurations 4. **Register components**: Make available to Claude Code 5. **Initialize**: Start MCP servers, register hooks **Timing**: Component registration happens during Claude Code initialization, not continuously. ### Activation Phase When components are used: **Commands**: User types slash command → Claude Code looks up → Executes **Agents**: Task arrives → Claude Code evaluates capabilities → Selects agent **Skills**: Task context matches description → Claude Code loads skill **Hooks**: Event occurs → Claude Code calls matching hooks **MCP Servers**: Tool call matches server capability → Forwards to server ## Command Organization Patterns ### Flat Structure Single directory with all commands: ``` commands/ ├── build.md ├── test.md ├── deploy.md ├── review.md └── docs.md ``` **When to use**: - 5-15 commands total - All commands at same abstraction level - No clear categorization **Advantages**: - Simple, easy to navigate - No configuration needed - Fast discovery ### Categorized Structure Multiple directories for different command types: ``` commands/ # Core commands ├── build.md └── test.md admin-commands/ # Administrative ├── configure.md └── manage.md workflow-commands/ # Workflow automation ├── review.md └── deploy.md ``` **Manifest configuration**: ```json { "commands": [ "./commands", "./admin-commands", "./workflow-commands" ] } ``` **When to use**: - 15+ commands - Clear functional categories - Different permission levels **Advantages**: - Organized by purpose - Easier to maintain - Can restrict access by directory ### Hierarchical Structure Nested organization for complex plugins: ``` commands/ ├── ci/ │ ├── build.md │ ├── test.md │ └── lint.md ├── deployment/ │ ├── staging.md │ └── production.md └── management/ ├── config.md └── status.md ``` **Note**: Claude Code doesn't support nested command discovery automatically. Use custom paths: ```json { "commands": [ "./commands/ci", "./commands/deployment", "./commands/management" ] } ``` **When to use**: - 20+ commands - Multi-level categorization - Complex workflows **Advantages**: - Maximum organization - Clear boundaries - Scalable structure ## Agent Organization Patterns ### Role-Based Organization Organize agents by their primary role: ``` agents/ ├── code-reviewer.md # Reviews code ├── test-generator.md # Generates tests ├── documentation-writer.md # Writes docs └── refactorer.md # Refactors code ``` **When to use**: - Agents have distinct, non-overlapping roles - Users invoke agents manually - Clear agent responsibilities ### Capability-Based Organization Organize by specific capabilities: ``` agents/ ├── python-expert.md # Python-specific ├── typescript-expert.md # TypeScript-specific ├── api-specialist.md # API design └── database-specialist.md # Database work ``` **When to use**: - Technology-specific agents - Domain expertise focus - Automatic agent selection ### Workflow-Based Organization Organize by workflow stage: ``` agents/ ├── planning-agent.md # Planning phase ├── implementation-agent.md # Coding phase ├── testing-agent.md # Testing phase └── deployment-agent.md # Deployment phase ``` **When to use**: - Sequential workflows - Stage-specific expertise - Pipeline automation ## Skill Organization Patterns ### Topic-Based Organization Each skill covers a specific topic: ``` skills/ ├── api-design/ │ └── SKILL.md ├── error-handling/ │ └── SKILL.md ├── testing-strategies/ │ └── SKILL.md └── performance-optimization/ └── SKILL.md ``` **When to use**: - Knowledge-based skills - Educational or reference content - Broad applicability ### Tool-Based Organization Skills for specific tools or technologies: ``` skills/ ├── docker/ │ ├── SKILL.md │ └── references/ │ └── dockerfile-best-practices.md ├── kubernetes/ │ ├── SKILL.md │ └── examples/ │ └── deployment.yaml └── terraform/ ├── SKILL.md └── scripts/ └── validate-config.sh ``` **When to use**: - Tool-specific expertise - Complex tool configurations - Tool best practices ### Workflow-Based Organization Skills for complete workflows: ``` skills/ ├── code-review-workflow/ │ ├── SKILL.md │ └── references/ │ ├── checklist.md │ └── standards.md ├── deployment-workflow/ │ ├── SKILL.md │ └── scripts/ │ ├── pre-deploy.sh │ └── post-deploy.sh └── testing-workflow/ ├── SKILL.md └── examples/ └── test-structure.md ``` **When to use**: - Multi-step processes - Company-specific workflows - Process automation ### Skill with Rich Resources Comprehensive skill with all resource types: ``` skills/ └── api-testing/ ├── SKILL.md # Core skill (1500 words) ├── references/ │ ├── rest-api-guide.md │ ├── graphql-guide.md │ └── authentication.md ├── examples/ │ ├── basic-test.js │ ├── authenticated-test.js │ └── integration-test.js ├── scripts/ │ ├── run-tests.sh │ └── generate-report.py └── assets/ └── test-template.json ``` **Resource usage**: - **SKILL.md**: Overview and when to use resources - **references/**: Detailed guides (loaded as needed) - **examples/**: Copy-paste code samples - **scripts/**: Executable test runners - **assets/**: Templates and configurations ## Hook Organization Patterns ### Monolithic Configuration Single hooks.json with all hooks: ``` hooks/ ├── hooks.json # All hook definitions └── scripts/ ├── validate-write.sh ├── validate-bash.sh └── load-context.sh ``` **hooks.json**: ```json { "PreToolUse": [...], "PostToolUse": [...], "Stop": [...], "SessionStart": [...] } ``` **When to use**: - 5-10 hooks total - Simple hook logic - Centralized configuration ### Event-Based Organization Separate files per event type: ``` hooks/ ├── hooks.json # Combines all ├── pre-tool-use.json # PreToolUse hooks ├── post-tool-use.json # PostToolUse hooks ├── stop.json # Stop hooks └── scripts/ ├── validate/ │ ├── write.sh │ └── bash.sh └── context/ └── load.sh ``` **hooks.json** (combines): ```json { "PreToolUse": ${file:./pre-tool-use.json}, "PostToolUse": ${file:./post-tool-use.json}, "Stop": ${file:./stop.json} } ``` **Note**: Use build script to combine files, Claude Code doesn't support file references. **When to use**: - 10+ hooks - Different teams managing different events - Complex hook configurations ### Purpose-Based Organization Group by functional purpose: ``` hooks/ ├── hooks.json └── scripts/ ├── security/ │ ├── validate-paths.sh │ ├── check-credentials.sh │ └── scan-malware.sh ├── quality/ │ ├── lint-code.sh │ ├── check-tests.sh │ └── verify-docs.sh └── workflow/ ├── notify-team.sh └── update-status.sh ``` **When to use**: - Many hook scripts - Clear functional boundaries - Team specialization ## Script Organization Patterns ### Flat Scripts All scripts in single directory: ``` scripts/ ├── build.sh ├── test.py ├── deploy.sh ├── validate.js └── report.py ``` **When to use**: - 5-10 scripts - All scripts related - Simple plugin ### Categorized Scripts Group by purpose: ``` scripts/ ├── build/ │ ├── compile.sh │ └── package.sh ├── test/ │ ├── run-unit.sh │ └── run-integration.sh ├── deploy/ │ ├── staging.sh │ └── production.sh └── utils/ ├── log.sh └── notify.sh ``` **When to use**: - 10+ scripts - Clear categories - Reusable utilities ### Language-Based Organization Group by programming language: ``` scripts/ ├── bash/ │ ├── build.sh │ └── deploy.sh ├── python/ │ ├── analyze.py │ └── report.py └── javascript/ ├── bundle.js └── optimize.js ``` **When to use**: - Multi-language scripts - Different runtime requirements - Language-specific dependencies ## Cross-Component Patterns ### Shared Resources Components sharing common resources: ``` plugin/ ├── commands/ │ ├── test.md # Uses lib/test-utils.sh │ └── deploy.md # Uses lib/deploy-utils.sh ├── agents/ │ └── tester.md # References lib/test-utils.sh ├── hooks/ │ └── scripts/ │ └── pre-test.sh # Sources lib/test-utils.sh └── lib/ ├── test-utils.sh └── deploy-utils.sh ``` **Usage in components**: ```bash #!/bin/bash source "${CLAUDE_PLUGIN_ROOT}/lib/test-utils.sh" run_tests ``` **Benefits**: - Code reuse - Consistent behavior - Easier maintenance ### Layered Architecture Separate concerns into layers: ``` plugin/ ├── commands/ # User interface layer ├── agents/ # Orchestration layer ├── skills/ # Knowledge layer └── lib/ ├── core/ # Core business logic ├── integrations/ # External services └── utils/ # Helper functions ``` **When to use**: - Large plugins (100+ files) - Multiple developers - Clear separation of concerns ### Plugin Within Plugin Nested plugin structure: ``` plugin/ ├── .claude-plugin/ │ └── plugin.json ├── core/ # Core functionality │ ├── commands/ │ └── agents/ └── extensions/ # Optional extensions ├── extension-a/ │ ├── commands/ │ └── agents/ └── extension-b/ ├── commands/ └── agents/ ``` **Manifest**: ```json { "commands": [ "./core/commands", "./extensions/extension-a/commands", "./extensions/extension-b/commands" ] } ``` **When to use**: - Modular functionality - Optional features - Plugin families ## Best Practices ### Naming 1. **Consistent naming**: Match file names to component purpose 2. **Descriptive names**: Indicate what component does 3. **Avoid abbreviations**: Use full words for clarity ### Organization 1. **Start simple**: Use flat structure, reorganize when needed 2. **Group related items**: Keep related components together 3. **Separate concerns**: Don't mix unrelated functionality ### Scalability 1. **Plan for growth**: Choose structure that scales 2. **Refactor early**: Reorganize before it becomes painful 3. **Document structure**: Explain organization in README ### Maintainability 1. **Consistent patterns**: Use same structure throughout 2. **Minimize nesting**: Keep directory depth manageable 3. **Use conventions**: Follow community standards ### Performance 1. **Avoid deep nesting**: Impacts discovery time 2. **Minimize custom paths**: Use defaults when possible 3. **Keep configurations small**: Large configs slow loading ================================================ FILE: plugins/plugin-dev/skills/plugin-structure/references/manifest-reference.md ================================================ # Plugin Manifest Reference Complete reference for `plugin.json` configuration. ## File Location **Required path**: `.claude-plugin/plugin.json` The manifest MUST be in the `.claude-plugin/` directory at the plugin root. Claude Code will not recognize plugins without this file in the correct location. ## Complete Field Reference ### Core Fields #### name (required) **Type**: String **Format**: kebab-case **Example**: `"test-automation-suite"` The unique identifier for the plugin. Used for: - Plugin identification in Claude Code - Conflict detection with other plugins - Command namespacing (optional) **Requirements**: - Must be unique across all installed plugins - Use only lowercase letters, numbers, and hyphens - No spaces or special characters - Start with a letter - End with a letter or number **Validation**: ```javascript /^[a-z][a-z0-9]*(-[a-z0-9]+)*$/ ``` **Examples**: - ✅ Good: `api-tester`, `code-review`, `git-workflow-automation` - ❌ Bad: `API Tester`, `code_review`, `-git-workflow`, `test-` #### version **Type**: String **Format**: Semantic versioning (MAJOR.MINOR.PATCH) **Example**: `"2.1.0"` **Default**: `"0.1.0"` if not specified Semantic versioning guidelines: - **MAJOR**: Incompatible API changes, breaking changes - **MINOR**: New functionality, backward-compatible - **PATCH**: Bug fixes, backward-compatible **Pre-release versions**: - `"1.0.0-alpha.1"` - Alpha release - `"1.0.0-beta.2"` - Beta release - `"1.0.0-rc.1"` - Release candidate **Examples**: - `"0.1.0"` - Initial development - `"1.0.0"` - First stable release - `"1.2.3"` - Patch update to 1.2 - `"2.0.0"` - Major version with breaking changes #### description **Type**: String **Length**: 50-200 characters recommended **Example**: `"Automates code review workflows with style checks and automated feedback"` Brief explanation of plugin purpose and functionality. **Best practices**: - Focus on what the plugin does, not how - Use active voice - Mention key features or benefits - Keep under 200 characters for marketplace display **Examples**: - ✅ "Generates comprehensive test suites from code analysis and coverage reports" - ✅ "Integrates with Jira for automatic issue tracking and sprint management" - ❌ "A plugin that helps you do testing stuff" - ❌ "This is a very long description that goes on and on about every single feature..." ### Metadata Fields #### author **Type**: Object **Fields**: name (required), email (optional), url (optional) ```json { "author": { "name": "Jane Developer", "email": "jane@example.com", "url": "https://janedeveloper.com" } } ``` **Alternative format** (string only): ```json { "author": "Jane Developer (https://janedeveloper.com)" } ``` **Use cases**: - Credit and attribution - Contact for support or questions - Marketplace display - Community recognition #### homepage **Type**: String (URL) **Example**: `"https://docs.example.com/plugins/my-plugin"` Link to plugin documentation or landing page. **Should point to**: - Plugin documentation site - Project homepage - Detailed usage guide - Installation instructions **Not for**: - Source code (use `repository` field) - Issue tracker (include in documentation) - Personal websites (use `author.url`) #### repository **Type**: String (URL) or Object **Example**: `"https://github.com/user/plugin-name"` Source code repository location. **String format**: ```json { "repository": "https://github.com/user/plugin-name" } ``` **Object format** (detailed): ```json { "repository": { "type": "git", "url": "https://github.com/user/plugin-name.git", "directory": "packages/plugin-name" } } ``` **Use cases**: - Source code access - Issue reporting - Community contributions - Transparency and trust #### license **Type**: String **Format**: SPDX identifier **Example**: `"MIT"` Software license identifier. **Common licenses**: - `"MIT"` - Permissive, popular choice - `"Apache-2.0"` - Permissive with patent grant - `"GPL-3.0"` - Copyleft - `"BSD-3-Clause"` - Permissive - `"ISC"` - Permissive, similar to MIT - `"UNLICENSED"` - Proprietary, not open source **Full list**: https://spdx.org/licenses/ **Multiple licenses**: ```json { "license": "(MIT OR Apache-2.0)" } ``` #### keywords **Type**: Array of strings **Example**: `["testing", "automation", "ci-cd", "quality-assurance"]` Tags for plugin discovery and categorization. **Best practices**: - Use 5-10 keywords - Include functionality categories - Add technology names - Use common search terms - Avoid duplicating plugin name **Categories to consider**: - Functionality: `testing`, `debugging`, `documentation`, `deployment` - Technologies: `typescript`, `python`, `docker`, `aws` - Workflows: `ci-cd`, `code-review`, `git-workflow` - Domains: `web-development`, `data-science`, `devops` ### Component Path Fields #### commands **Type**: String or Array of strings **Default**: `["./commands"]` **Example**: `"./cli-commands"` Additional directories or files containing command definitions. **Single path**: ```json { "commands": "./custom-commands" } ``` **Multiple paths**: ```json { "commands": [ "./commands", "./admin-commands", "./experimental-commands" ] } ``` **Behavior**: Supplements default `commands/` directory (does not replace) **Use cases**: - Organizing commands by category - Separating stable from experimental commands - Loading commands from shared locations #### agents **Type**: String or Array of strings **Default**: `["./agents"]` **Example**: `"./specialized-agents"` Additional directories or files containing agent definitions. **Format**: Same as `commands` field **Use cases**: - Grouping agents by specialization - Separating general-purpose from task-specific agents - Loading agents from plugin dependencies #### hooks **Type**: String (path to JSON file) or Object (inline configuration) **Default**: `"./hooks/hooks.json"` Hook configuration location or inline definition. **File path**: ```json { "hooks": "./config/hooks.json" } ``` **Inline configuration**: ```json { "hooks": { "PreToolUse": [ { "matcher": "Write", "hooks": [ { "type": "command", "command": "bash ${CLAUDE_PLUGIN_ROOT}/scripts/validate.sh", "timeout": 30 } ] } ] } } ``` **Use cases**: - Simple plugins: Inline configuration (< 50 lines) - Complex plugins: External JSON file - Multiple hook sets: Separate files for different contexts #### mcpServers **Type**: String (path to JSON file) or Object (inline configuration) **Default**: `./.mcp.json` MCP server configuration location or inline definition. **File path**: ```json { "mcpServers": "./.mcp.json" } ``` **Inline configuration**: ```json { "mcpServers": { "github": { "command": "node", "args": ["${CLAUDE_PLUGIN_ROOT}/servers/github-mcp.js"], "env": { "GITHUB_TOKEN": "${GITHUB_TOKEN}" } } } } ``` **Use cases**: - Simple plugins: Single inline server (< 20 lines) - Complex plugins: External `.mcp.json` file - Multiple servers: Always use external file ## Path Resolution ### Relative Path Rules All paths in component fields must follow these rules: 1. **Must be relative**: No absolute paths 2. **Must start with `./`**: Indicates relative to plugin root 3. **Cannot use `../`**: No parent directory navigation 4. **Forward slashes only**: Even on Windows **Examples**: - ✅ `"./commands"` - ✅ `"./src/commands"` - ✅ `"./configs/hooks.json"` - ❌ `"/Users/name/plugin/commands"` - ❌ `"commands"` (missing `./`) - ❌ `"../shared/commands"` - ❌ `".\\commands"` (backslash) ### Resolution Order When Claude Code loads components: 1. **Default directories**: Scans standard locations first - `./commands/` - `./agents/` - `./skills/` - `./hooks/hooks.json` - `./.mcp.json` 2. **Custom paths**: Scans paths specified in manifest - Paths from `commands` field - Paths from `agents` field - Files from `hooks` and `mcpServers` fields 3. **Merge behavior**: Components from all locations load - No overwriting - All discovered components register - Name conflicts cause errors ## Validation ### Manifest Validation Claude Code validates the manifest on plugin load: **Syntax validation**: - Valid JSON format - No syntax errors - Correct field types **Field validation**: - `name` field present and valid format - `version` follows semantic versioning (if present) - Paths are relative with `./` prefix - URLs are valid (if present) **Component validation**: - Referenced paths exist - Hook and MCP configurations are valid - No circular dependencies ### Common Validation Errors **Invalid name format**: ```json { "name": "My Plugin" // ❌ Contains spaces } ``` Fix: Use kebab-case ```json { "name": "my-plugin" // ✅ } ``` **Absolute path**: ```json { "commands": "/Users/name/commands" // ❌ Absolute path } ``` Fix: Use relative path ```json { "commands": "./commands" // ✅ } ``` **Missing ./ prefix**: ```json { "hooks": "hooks/hooks.json" // ❌ No ./ } ``` Fix: Add ./ prefix ```json { "hooks": "./hooks/hooks.json" // ✅ } ``` **Invalid version**: ```json { "version": "1.0" // ❌ Not semantic versioning } ``` Fix: Use MAJOR.MINOR.PATCH ```json { "version": "1.0.0" // ✅ } ``` ## Minimal vs. Complete Examples ### Minimal Plugin Bare minimum for a working plugin: ```json { "name": "hello-world" } ``` Relies entirely on default directory discovery. ### Recommended Plugin Good metadata for distribution: ```json { "name": "code-review-assistant", "version": "1.0.0", "description": "Automates code review with style checks and suggestions", "author": { "name": "Jane Developer", "email": "jane@example.com" }, "homepage": "https://docs.example.com/code-review", "repository": "https://github.com/janedev/code-review-assistant", "license": "MIT", "keywords": ["code-review", "automation", "quality", "ci-cd"] } ``` ### Complete Plugin Full configuration with all features: ```json { "name": "enterprise-devops", "version": "2.3.1", "description": "Comprehensive DevOps automation for enterprise CI/CD pipelines", "author": { "name": "DevOps Team", "email": "devops@company.com", "url": "https://company.com/devops" }, "homepage": "https://docs.company.com/plugins/devops", "repository": { "type": "git", "url": "https://github.com/company/devops-plugin.git" }, "license": "Apache-2.0", "keywords": [ "devops", "ci-cd", "automation", "kubernetes", "docker", "deployment" ], "commands": [ "./commands", "./admin-commands" ], "agents": "./specialized-agents", "hooks": "./config/hooks.json", "mcpServers": "./.mcp.json" } ``` ## Best Practices ### Metadata 1. **Always include version**: Track changes and updates 2. **Write clear descriptions**: Help users understand plugin purpose 3. **Provide contact information**: Enable user support 4. **Link to documentation**: Reduce support burden 5. **Choose appropriate license**: Match project goals ### Paths 1. **Use defaults when possible**: Minimize configuration 2. **Organize logically**: Group related components 3. **Document custom paths**: Explain why non-standard layout used 4. **Test path resolution**: Verify on multiple systems ### Maintenance 1. **Bump version on changes**: Follow semantic versioning 2. **Update keywords**: Reflect new functionality 3. **Keep description current**: Match actual capabilities 4. **Maintain changelog**: Track version history 5. **Update repository links**: Keep URLs current ### Distribution 1. **Complete metadata before publishing**: All fields filled 2. **Test on clean install**: Verify plugin works without dev environment 3. **Validate manifest**: Use validation tools 4. **Include README**: Document installation and usage 5. **Specify license file**: Include LICENSE file in plugin root ================================================ FILE: plugins/plugin-dev/skills/skill-development/SKILL.md ================================================ --- name: Skill Development description: This skill should be used when the user wants to "create a skill", "add a skill to plugin", "write a new skill", "improve skill description", "organize skill content", or needs guidance on skill structure, progressive disclosure, or skill development best practices for Claude Code plugins. version: 0.1.0 --- # Skill Development for Claude Code Plugins This skill provides guidance for creating effective skills for Claude Code plugins. ## About Skills Skills are modular, self-contained packages that extend Claude's capabilities by providing specialized knowledge, workflows, and tools. Think of them as "onboarding guides" for specific domains or tasks—they transform Claude from a general-purpose agent into a specialized agent equipped with procedural knowledge that no model can fully possess. ### What Skills Provide 1. Specialized workflows - Multi-step procedures for specific domains 2. Tool integrations - Instructions for working with specific file formats or APIs 3. Domain expertise - Company-specific knowledge, schemas, business logic 4. Bundled resources - Scripts, references, and assets for complex and repetitive tasks ### Anatomy of a Skill Every skill consists of a required SKILL.md file and optional bundled resources: ``` skill-name/ ├── SKILL.md (required) │ ├── YAML frontmatter metadata (required) │ │ ├── name: (required) │ │ └── description: (required) │ └── Markdown instructions (required) └── Bundled Resources (optional) ├── scripts/ - Executable code (Python/Bash/etc.) ├── references/ - Documentation intended to be loaded into context as needed └── assets/ - Files used in output (templates, icons, fonts, etc.) ``` #### SKILL.md (required) **Metadata Quality:** The `name` and `description` in YAML frontmatter determine when Claude will use the skill. Be specific about what the skill does and when to use it. Use the third-person (e.g. "This skill should be used when..." instead of "Use this skill when..."). #### Bundled Resources (optional) ##### Scripts (`scripts/`) Executable code (Python/Bash/etc.) for tasks that require deterministic reliability or are repeatedly rewritten. - **When to include**: When the same code is being rewritten repeatedly or deterministic reliability is needed - **Example**: `scripts/rotate_pdf.py` for PDF rotation tasks - **Benefits**: Token efficient, deterministic, may be executed without loading into context - **Note**: Scripts may still need to be read by Claude for patching or environment-specific adjustments ##### References (`references/`) Documentation and reference material intended to be loaded as needed into context to inform Claude's process and thinking. - **When to include**: For documentation that Claude should reference while working - **Examples**: `references/finance.md` for financial schemas, `references/mnda.md` for company NDA template, `references/policies.md` for company policies, `references/api_docs.md` for API specifications - **Use cases**: Database schemas, API documentation, domain knowledge, company policies, detailed workflow guides - **Benefits**: Keeps SKILL.md lean, loaded only when Claude determines it's needed - **Best practice**: If files are large (>10k words), include grep search patterns in SKILL.md - **Avoid duplication**: Information should live in either SKILL.md or references files, not both. Prefer references files for detailed information unless it's truly core to the skill—this keeps SKILL.md lean while making information discoverable without hogging the context window. Keep only essential procedural instructions and workflow guidance in SKILL.md; move detailed reference material, schemas, and examples to references files. ##### Assets (`assets/`) Files not intended to be loaded into context, but rather used within the output Claude produces. - **When to include**: When the skill needs files that will be used in the final output - **Examples**: `assets/logo.png` for brand assets, `assets/slides.pptx` for PowerPoint templates, `assets/frontend-template/` for HTML/React boilerplate, `assets/font.ttf` for typography - **Use cases**: Templates, images, icons, boilerplate code, fonts, sample documents that get copied or modified - **Benefits**: Separates output resources from documentation, enables Claude to use files without loading them into context ### Progressive Disclosure Design Principle Skills use a three-level loading system to manage context efficiently: 1. **Metadata (name + description)** - Always in context (~100 words) 2. **SKILL.md body** - When skill triggers (<5k words) 3. **Bundled resources** - As needed by Claude (Unlimited*) *Unlimited because scripts can be executed without reading into context window. ## Skill Creation Process To create a skill, follow the "Skill Creation Process" in order, skipping steps only if there is a clear reason why they are not applicable. ### Step 1: Understanding the Skill with Concrete Examples Skip this step only when the skill's usage patterns are already clearly understood. It remains valuable even when working with an existing skill. To create an effective skill, clearly understand concrete examples of how the skill will be used. This understanding can come from either direct user examples or generated examples that are validated with user feedback. For example, when building an image-editor skill, relevant questions include: - "What functionality should the image-editor skill support? Editing, rotating, anything else?" - "Can you give some examples of how this skill would be used?" - "I can imagine users asking for things like 'Remove the red-eye from this image' or 'Rotate this image'. Are there other ways you imagine this skill being used?" - "What would a user say that should trigger this skill?" To avoid overwhelming users, avoid asking too many questions in a single message. Start with the most important questions and follow up as needed for better effectiveness. Conclude this step when there is a clear sense of the functionality the skill should support. ### Step 2: Planning the Reusable Skill Contents To turn concrete examples into an effective skill, analyze each example by: 1. Considering how to execute on the example from scratch 2. Identifying what scripts, references, and assets would be helpful when executing these workflows repeatedly Example: When building a `pdf-editor` skill to handle queries like "Help me rotate this PDF," the analysis shows: 1. Rotating a PDF requires re-writing the same code each time 2. A `scripts/rotate_pdf.py` script would be helpful to store in the skill Example: When designing a `frontend-webapp-builder` skill for queries like "Build me a todo app" or "Build me a dashboard to track my steps," the analysis shows: 1. Writing a frontend webapp requires the same boilerplate HTML/React each time 2. An `assets/hello-world/` template containing the boilerplate HTML/React project files would be helpful to store in the skill Example: When building a `big-query` skill to handle queries like "How many users have logged in today?" the analysis shows: 1. Querying BigQuery requires re-discovering the table schemas and relationships each time 2. A `references/schema.md` file documenting the table schemas would be helpful to store in the skill **For Claude Code plugins:** When building a hooks skill, the analysis shows: 1. Developers repeatedly need to validate hooks.json and test hook scripts 2. `scripts/validate-hook-schema.sh` and `scripts/test-hook.sh` utilities would be helpful 3. `references/patterns.md` for detailed hook patterns to avoid bloating SKILL.md To establish the skill's contents, analyze each concrete example to create a list of the reusable resources to include: scripts, references, and assets. ### Step 3: Create Skill Structure For Claude Code plugins, create the skill directory structure: ```bash mkdir -p plugin-name/skills/skill-name/{references,examples,scripts} touch plugin-name/skills/skill-name/SKILL.md ``` **Note:** Unlike the generic skill-creator which uses `init_skill.py`, plugin skills are created directly in the plugin's `skills/` directory with a simpler manual structure. ### Step 4: Edit the Skill When editing the (newly-created or existing) skill, remember that the skill is being created for another instance of Claude to use. Focus on including information that would be beneficial and non-obvious to Claude. Consider what procedural knowledge, domain-specific details, or reusable assets would help another Claude instance execute these tasks more effectively. #### Start with Reusable Skill Contents To begin implementation, start with the reusable resources identified above: `scripts/`, `references/`, and `assets/` files. Note that this step may require user input. For example, when implementing a `brand-guidelines` skill, the user may need to provide brand assets or templates to store in `assets/`, or documentation to store in `references/`. Also, delete any example files and directories not needed for the skill. Create only the directories you actually need (references/, examples/, scripts/). #### Update SKILL.md **Writing Style:** Write the entire skill using **imperative/infinitive form** (verb-first instructions), not second person. Use objective, instructional language (e.g., "To accomplish X, do Y" rather than "You should do X" or "If you need to do X"). This maintains consistency and clarity for AI consumption. **Description (Frontmatter):** Use third-person format with specific trigger phrases: ```yaml --- name: Skill Name description: This skill should be used when the user asks to "specific phrase 1", "specific phrase 2", "specific phrase 3". Include exact phrases users would say that should trigger this skill. Be concrete and specific. version: 0.1.0 --- ``` **Good description examples:** ```yaml description: This skill should be used when the user asks to "create a hook", "add a PreToolUse hook", "validate tool use", "implement prompt-based hooks", or mentions hook events (PreToolUse, PostToolUse, Stop). ``` **Bad description examples:** ```yaml description: Use this skill when working with hooks. # Wrong person, vague description: Load when user needs hook help. # Not third person description: Provides hook guidance. # No trigger phrases ``` To complete SKILL.md body, answer the following questions: 1. What is the purpose of the skill, in a few sentences? 2. When should the skill be used? (Include this in frontmatter description with specific triggers) 3. In practice, how should Claude use the skill? All reusable skill contents developed above should be referenced so that Claude knows how to use them. **Keep SKILL.md lean:** Target 1,500-2,000 words for the body. Move detailed content to references/: - Detailed patterns → `references/patterns.md` - Advanced techniques → `references/advanced.md` - Migration guides → `references/migration.md` - API references → `references/api-reference.md` **Reference resources in SKILL.md:** ```markdown ## Additional Resources ### Reference Files For detailed patterns and techniques, consult: - **`references/patterns.md`** - Common patterns - **`references/advanced.md`** - Advanced use cases ### Example Files Working examples in `examples/`: - **`example-script.sh`** - Working example ``` ### Step 5: Validate and Test **For plugin skills, validation is different from generic skills:** 1. **Check structure**: Skill directory in `plugin-name/skills/skill-name/` 2. **Validate SKILL.md**: Has frontmatter with name and description 3. **Check trigger phrases**: Description includes specific user queries 4. **Verify writing style**: Body uses imperative/infinitive form, not second person 5. **Test progressive disclosure**: SKILL.md is lean (~1,500-2,000 words), detailed content in references/ 6. **Check references**: All referenced files exist 7. **Validate examples**: Examples are complete and correct 8. **Test scripts**: Scripts are executable and work correctly **Use the skill-reviewer agent:** ``` Ask: "Review my skill and check if it follows best practices" ``` The skill-reviewer agent will check description quality, content organization, and progressive disclosure. ### Step 6: Iterate After testing the skill, users may request improvements. Often this happens right after using the skill, with fresh context of how the skill performed. **Iteration workflow:** 1. Use the skill on real tasks 2. Notice struggles or inefficiencies 3. Identify how SKILL.md or bundled resources should be updated 4. Implement changes and test again **Common improvements:** - Strengthen trigger phrases in description - Move long sections from SKILL.md to references/ - Add missing examples or scripts - Clarify ambiguous instructions - Add edge case handling ## Plugin-Specific Considerations ### Skill Location in Plugins Plugin skills live in the plugin's `skills/` directory: ``` my-plugin/ ├── .claude-plugin/ │ └── plugin.json ├── commands/ ├── agents/ └── skills/ └── my-skill/ ├── SKILL.md ├── references/ ├── examples/ └── scripts/ ``` ### Auto-Discovery Claude Code automatically discovers skills: - Scans `skills/` directory - Finds subdirectories containing `SKILL.md` - Loads skill metadata (name + description) always - Loads SKILL.md body when skill triggers - Loads references/examples when needed ### No Packaging Needed Plugin skills are distributed as part of the plugin, not as separate ZIP files. Users get skills when they install the plugin. ### Testing in Plugins Test skills by installing plugin locally: ```bash # Test with --plugin-dir cc --plugin-dir /path/to/plugin # Ask questions that should trigger the skill # Verify skill loads correctly ``` ## Examples from Plugin-Dev Study the skills in this plugin as examples of best practices: **hook-development skill:** - Excellent trigger phrases: "create a hook", "add a PreToolUse hook", etc. - Lean SKILL.md (1,651 words) - 3 references/ files for detailed content - 3 examples/ of working hooks - 3 scripts/ utilities **agent-development skill:** - Strong triggers: "create an agent", "agent frontmatter", etc. - Focused SKILL.md (1,438 words) - References include the AI generation prompt from Claude Code - Complete agent examples **plugin-settings skill:** - Specific triggers: "plugin settings", ".local.md files", "YAML frontmatter" - References show real implementations (multi-agent-swarm, ralph-wiggum) - Working parsing scripts Each demonstrates progressive disclosure and strong triggering. ## Progressive Disclosure in Practice ### What Goes in SKILL.md **Include (always loaded when skill triggers):** - Core concepts and overview - Essential procedures and workflows - Quick reference tables - Pointers to references/examples/scripts - Most common use cases **Keep under 3,000 words, ideally 1,500-2,000 words** ### What Goes in references/ **Move to references/ (loaded as needed):** - Detailed patterns and advanced techniques - Comprehensive API documentation - Migration guides - Edge cases and troubleshooting - Extensive examples and walkthroughs **Each reference file can be large (2,000-5,000+ words)** ### What Goes in examples/ **Working code examples:** - Complete, runnable scripts - Configuration files - Template files - Real-world usage examples **Users can copy and adapt these directly** ### What Goes in scripts/ **Utility scripts:** - Validation tools - Testing helpers - Parsing utilities - Automation scripts **Should be executable and documented** ## Writing Style Requirements ### Imperative/Infinitive Form Write using verb-first instructions, not second person: **Correct (imperative):** ``` To create a hook, define the event type. Configure the MCP server with authentication. Validate settings before use. ``` **Incorrect (second person):** ``` You should create a hook by defining the event type. You need to configure the MCP server. You must validate settings before use. ``` ### Third-Person in Description The frontmatter description must use third person: **Correct:** ```yaml description: This skill should be used when the user asks to "create X", "configure Y"... ``` **Incorrect:** ```yaml description: Use this skill when you want to create X... description: Load this skill when user asks... ``` ### Objective, Instructional Language Focus on what to do, not who should do it: **Correct:** ``` Parse the frontmatter using sed. Extract fields with grep. Validate values before use. ``` **Incorrect:** ``` You can parse the frontmatter... Claude should extract fields... The user might validate values... ``` ## Validation Checklist Before finalizing a skill: **Structure:** - [ ] SKILL.md file exists with valid YAML frontmatter - [ ] Frontmatter has `name` and `description` fields - [ ] Markdown body is present and substantial - [ ] Referenced files actually exist **Description Quality:** - [ ] Uses third person ("This skill should be used when...") - [ ] Includes specific trigger phrases users would say - [ ] Lists concrete scenarios ("create X", "configure Y") - [ ] Not vague or generic **Content Quality:** - [ ] SKILL.md body uses imperative/infinitive form - [ ] Body is focused and lean (1,500-2,000 words ideal, <5k max) - [ ] Detailed content moved to references/ - [ ] Examples are complete and working - [ ] Scripts are executable and documented **Progressive Disclosure:** - [ ] Core concepts in SKILL.md - [ ] Detailed docs in references/ - [ ] Working code in examples/ - [ ] Utilities in scripts/ - [ ] SKILL.md references these resources **Testing:** - [ ] Skill triggers on expected user queries - [ ] Content is helpful for intended tasks - [ ] No duplicated information across files - [ ] References load when needed ## Common Mistakes to Avoid ### Mistake 1: Weak Trigger Description ❌ **Bad:** ```yaml description: Provides guidance for working with hooks. ``` **Why bad:** Vague, no specific trigger phrases, not third person ✅ **Good:** ```yaml description: This skill should be used when the user asks to "create a hook", "add a PreToolUse hook", "validate tool use", or mentions hook events. Provides comprehensive hooks API guidance. ``` **Why good:** Third person, specific phrases, concrete scenarios ### Mistake 2: Too Much in SKILL.md ❌ **Bad:** ``` skill-name/ └── SKILL.md (8,000 words - everything in one file) ``` **Why bad:** Bloats context when skill loads, detailed content always loaded ✅ **Good:** ``` skill-name/ ├── SKILL.md (1,800 words - core essentials) └── references/ ├── patterns.md (2,500 words) └── advanced.md (3,700 words) ``` **Why good:** Progressive disclosure, detailed content loaded only when needed ### Mistake 3: Second Person Writing ❌ **Bad:** ```markdown You should start by reading the configuration file. You need to validate the input. You can use the grep tool to search. ``` **Why bad:** Second person, not imperative form ✅ **Good:** ```markdown Start by reading the configuration file. Validate the input before processing. Use the grep tool to search for patterns. ``` **Why good:** Imperative form, direct instructions ### Mistake 4: Missing Resource References ❌ **Bad:** ```markdown # SKILL.md [Core content] [No mention of references/ or examples/] ``` **Why bad:** Claude doesn't know references exist ✅ **Good:** ```markdown # SKILL.md [Core content] ## Additional Resources ### Reference Files - **`references/patterns.md`** - Detailed patterns - **`references/advanced.md`** - Advanced techniques ### Examples - **`examples/script.sh`** - Working example ``` **Why good:** Claude knows where to find additional information ## Quick Reference ### Minimal Skill ``` skill-name/ └── SKILL.md ``` Good for: Simple knowledge, no complex resources needed ### Standard Skill (Recommended) ``` skill-name/ ├── SKILL.md ├── references/ │ └── detailed-guide.md └── examples/ └── working-example.sh ``` Good for: Most plugin skills with detailed documentation ### Complete Skill ``` skill-name/ ├── SKILL.md ├── references/ │ ├── patterns.md │ └── advanced.md ├── examples/ │ ├── example1.sh │ └── example2.json └── scripts/ └── validate.sh ``` Good for: Complex domains with validation utilities ## Best Practices Summary ✅ **DO:** - Use third-person in description ("This skill should be used when...") - Include specific trigger phrases ("create X", "configure Y") - Keep SKILL.md lean (1,500-2,000 words) - Use progressive disclosure (move details to references/) - Write in imperative/infinitive form - Reference supporting files clearly - Provide working examples - Create utility scripts for common operations - Study plugin-dev's skills as templates ❌ **DON'T:** - Use second person anywhere - Have vague trigger conditions - Put everything in SKILL.md (>3,000 words without references/) - Write in second person ("You should...") - Leave resources unreferenced - Include broken or incomplete examples - Skip validation ## Additional Resources ### Study These Skills Plugin-dev's skills demonstrate best practices: - `../hook-development/` - Progressive disclosure, utilities - `../agent-development/` - AI-assisted creation, references - `../mcp-integration/` - Comprehensive references - `../plugin-settings/` - Real-world examples - `../command-development/` - Clear critical concepts - `../plugin-structure/` - Good organization ### Reference Files For complete skill-creator methodology: - **`references/skill-creator-original.md`** - Full original skill-creator content ## Implementation Workflow To create a skill for your plugin: 1. **Understand use cases**: Identify concrete examples of skill usage 2. **Plan resources**: Determine what scripts/references/examples needed 3. **Create structure**: `mkdir -p skills/skill-name/{references,examples,scripts}` 4. **Write SKILL.md**: - Frontmatter with third-person description and trigger phrases - Lean body (1,500-2,000 words) in imperative form - Reference supporting files 5. **Add resources**: Create references/, examples/, scripts/ as needed 6. **Validate**: Check description, writing style, organization 7. **Test**: Verify skill loads on expected triggers 8. **Iterate**: Improve based on usage Focus on strong trigger descriptions, progressive disclosure, and imperative writing style for effective skills that load when needed and provide targeted guidance. ================================================ FILE: plugins/plugin-dev/skills/skill-development/references/skill-creator-original.md ================================================ --- name: skill-creator description: Guide for creating effective skills. This skill should be used when users want to create a new skill (or update an existing skill) that extends Claude's capabilities with specialized knowledge, workflows, or tool integrations. license: Complete terms in LICENSE.txt --- # Skill Creator This skill provides guidance for creating effective skills. ## About Skills Skills are modular, self-contained packages that extend Claude's capabilities by providing specialized knowledge, workflows, and tools. Think of them as "onboarding guides" for specific domains or tasks—they transform Claude from a general-purpose agent into a specialized agent equipped with procedural knowledge that no model can fully possess. ### What Skills Provide 1. Specialized workflows - Multi-step procedures for specific domains 2. Tool integrations - Instructions for working with specific file formats or APIs 3. Domain expertise - Company-specific knowledge, schemas, business logic 4. Bundled resources - Scripts, references, and assets for complex and repetitive tasks ### Anatomy of a Skill Every skill consists of a required SKILL.md file and optional bundled resources: ``` skill-name/ ├── SKILL.md (required) │ ├── YAML frontmatter metadata (required) │ │ ├── name: (required) │ │ └── description: (required) │ └── Markdown instructions (required) └── Bundled Resources (optional) ├── scripts/ - Executable code (Python/Bash/etc.) ├── references/ - Documentation intended to be loaded into context as needed └── assets/ - Files used in output (templates, icons, fonts, etc.) ``` #### SKILL.md (required) **Metadata Quality:** The `name` and `description` in YAML frontmatter determine when Claude will use the skill. Be specific about what the skill does and when to use it. Use the third-person (e.g. "This skill should be used when..." instead of "Use this skill when..."). #### Bundled Resources (optional) ##### Scripts (`scripts/`) Executable code (Python/Bash/etc.) for tasks that require deterministic reliability or are repeatedly rewritten. - **When to include**: When the same code is being rewritten repeatedly or deterministic reliability is needed - **Example**: `scripts/rotate_pdf.py` for PDF rotation tasks - **Benefits**: Token efficient, deterministic, may be executed without loading into context - **Note**: Scripts may still need to be read by Claude for patching or environment-specific adjustments ##### References (`references/`) Documentation and reference material intended to be loaded as needed into context to inform Claude's process and thinking. - **When to include**: For documentation that Claude should reference while working - **Examples**: `references/finance.md` for financial schemas, `references/mnda.md` for company NDA template, `references/policies.md` for company policies, `references/api_docs.md` for API specifications - **Use cases**: Database schemas, API documentation, domain knowledge, company policies, detailed workflow guides - **Benefits**: Keeps SKILL.md lean, loaded only when Claude determines it's needed - **Best practice**: If files are large (>10k words), include grep search patterns in SKILL.md - **Avoid duplication**: Information should live in either SKILL.md or references files, not both. Prefer references files for detailed information unless it's truly core to the skill—this keeps SKILL.md lean while making information discoverable without hogging the context window. Keep only essential procedural instructions and workflow guidance in SKILL.md; move detailed reference material, schemas, and examples to references files. ##### Assets (`assets/`) Files not intended to be loaded into context, but rather used within the output Claude produces. - **When to include**: When the skill needs files that will be used in the final output - **Examples**: `assets/logo.png` for brand assets, `assets/slides.pptx` for PowerPoint templates, `assets/frontend-template/` for HTML/React boilerplate, `assets/font.ttf` for typography - **Use cases**: Templates, images, icons, boilerplate code, fonts, sample documents that get copied or modified - **Benefits**: Separates output resources from documentation, enables Claude to use files without loading them into context ### Progressive Disclosure Design Principle Skills use a three-level loading system to manage context efficiently: 1. **Metadata (name + description)** - Always in context (~100 words) 2. **SKILL.md body** - When skill triggers (<5k words) 3. **Bundled resources** - As needed by Claude (Unlimited*) *Unlimited because scripts can be executed without reading into context window. ## Skill Creation Process To create a skill, follow the "Skill Creation Process" in order, skipping steps only if there is a clear reason why they are not applicable. ### Step 1: Understanding the Skill with Concrete Examples Skip this step only when the skill's usage patterns are already clearly understood. It remains valuable even when working with an existing skill. To create an effective skill, clearly understand concrete examples of how the skill will be used. This understanding can come from either direct user examples or generated examples that are validated with user feedback. For example, when building an image-editor skill, relevant questions include: - "What functionality should the image-editor skill support? Editing, rotating, anything else?" - "Can you give some examples of how this skill would be used?" - "I can imagine users asking for things like 'Remove the red-eye from this image' or 'Rotate this image'. Are there other ways you imagine this skill being used?" - "What would a user say that should trigger this skill?" To avoid overwhelming users, avoid asking too many questions in a single message. Start with the most important questions and follow up as needed for better effectiveness. Conclude this step when there is a clear sense of the functionality the skill should support. ### Step 2: Planning the Reusable Skill Contents To turn concrete examples into an effective skill, analyze each example by: 1. Considering how to execute on the example from scratch 2. Identifying what scripts, references, and assets would be helpful when executing these workflows repeatedly Example: When building a `pdf-editor` skill to handle queries like "Help me rotate this PDF," the analysis shows: 1. Rotating a PDF requires re-writing the same code each time 2. A `scripts/rotate_pdf.py` script would be helpful to store in the skill Example: When designing a `frontend-webapp-builder` skill for queries like "Build me a todo app" or "Build me a dashboard to track my steps," the analysis shows: 1. Writing a frontend webapp requires the same boilerplate HTML/React each time 2. An `assets/hello-world/` template containing the boilerplate HTML/React project files would be helpful to store in the skill Example: When building a `big-query` skill to handle queries like "How many users have logged in today?" the analysis shows: 1. Querying BigQuery requires re-discovering the table schemas and relationships each time 2. A `references/schema.md` file documenting the table schemas would be helpful to store in the skill To establish the skill's contents, analyze each concrete example to create a list of the reusable resources to include: scripts, references, and assets. ### Step 3: Initializing the Skill At this point, it is time to actually create the skill. Skip this step only if the skill being developed already exists, and iteration or packaging is needed. In this case, continue to the next step. When creating a new skill from scratch, always run the `init_skill.py` script. The script conveniently generates a new template skill directory that automatically includes everything a skill requires, making the skill creation process much more efficient and reliable. Usage: ```bash scripts/init_skill.py --path ``` The script: - Creates the skill directory at the specified path - Generates a SKILL.md template with proper frontmatter and TODO placeholders - Creates example resource directories: `scripts/`, `references/`, and `assets/` - Adds example files in each directory that can be customized or deleted After initialization, customize or remove the generated SKILL.md and example files as needed. ### Step 4: Edit the Skill When editing the (newly-generated or existing) skill, remember that the skill is being created for another instance of Claude to use. Focus on including information that would be beneficial and non-obvious to Claude. Consider what procedural knowledge, domain-specific details, or reusable assets would help another Claude instance execute these tasks more effectively. #### Start with Reusable Skill Contents To begin implementation, start with the reusable resources identified above: `scripts/`, `references/`, and `assets/` files. Note that this step may require user input. For example, when implementing a `brand-guidelines` skill, the user may need to provide brand assets or templates to store in `assets/`, or documentation to store in `references/`. Also, delete any example files and directories not needed for the skill. The initialization script creates example files in `scripts/`, `references/`, and `assets/` to demonstrate structure, but most skills won't need all of them. #### Update SKILL.md **Writing Style:** Write the entire skill using **imperative/infinitive form** (verb-first instructions), not second person. Use objective, instructional language (e.g., "To accomplish X, do Y" rather than "You should do X" or "If you need to do X"). This maintains consistency and clarity for AI consumption. To complete SKILL.md, answer the following questions: 1. What is the purpose of the skill, in a few sentences? 2. When should the skill be used? 3. In practice, how should Claude use the skill? All reusable skill contents developed above should be referenced so that Claude knows how to use them. ### Step 5: Packaging a Skill Once the skill is ready, it should be packaged into a distributable zip file that gets shared with the user. The packaging process automatically validates the skill first to ensure it meets all requirements: ```bash scripts/package_skill.py ``` Optional output directory specification: ```bash scripts/package_skill.py ./dist ``` The packaging script will: 1. **Validate** the skill automatically, checking: - YAML frontmatter format and required fields - Skill naming conventions and directory structure - Description completeness and quality - File organization and resource references 2. **Package** the skill if validation passes, creating a zip file named after the skill (e.g., `my-skill.zip`) that includes all files and maintains the proper directory structure for distribution. If validation fails, the script will report the errors and exit without creating a package. Fix any validation errors and run the packaging command again. ### Step 6: Iterate After testing the skill, users may request improvements. Often this happens right after using the skill, with fresh context of how the skill performed. **Iteration workflow:** 1. Use the skill on real tasks 2. Notice struggles or inefficiencies 3. Identify how SKILL.md or bundled resources should be updated 4. Implement changes and test again ================================================ FILE: plugins/pr-review-toolkit/.claude-plugin/plugin.json ================================================ { "name": "pr-review-toolkit", "version": "1.0.0", "description": "Comprehensive PR review agents specializing in comments, tests, error handling, type design, code quality, and code simplification", "author": { "name": "Daisy", "email": "daisy@anthropic.com" } } ================================================ FILE: plugins/pr-review-toolkit/README.md ================================================ # PR Review Toolkit A comprehensive collection of specialized agents for thorough pull request review, covering code comments, test coverage, error handling, type design, code quality, and code simplification. ## Overview This plugin bundles 6 expert review agents that each focus on a specific aspect of code quality. Use them individually for targeted reviews or together for comprehensive PR analysis. ## Agents ### 1. comment-analyzer **Focus**: Code comment accuracy and maintainability **Analyzes:** - Comment accuracy vs actual code - Documentation completeness - Comment rot and technical debt - Misleading or outdated comments **When to use:** - After adding documentation - Before finalizing PRs with comment changes - When reviewing existing comments **Triggers:** ``` "Check if the comments are accurate" "Review the documentation I added" "Analyze comments for technical debt" ``` ### 2. pr-test-analyzer **Focus**: Test coverage quality and completeness **Analyzes:** - Behavioral vs line coverage - Critical gaps in test coverage - Test quality and resilience - Edge cases and error conditions **When to use:** - After creating a PR - When adding new functionality - To verify test thoroughness **Triggers:** ``` "Check if the tests are thorough" "Review test coverage for this PR" "Are there any critical test gaps?" ``` ### 3. silent-failure-hunter **Focus**: Error handling and silent failures **Analyzes:** - Silent failures in catch blocks - Inadequate error handling - Inappropriate fallback behavior - Missing error logging **When to use:** - After implementing error handling - When reviewing try/catch blocks - Before finalizing PRs with error handling **Triggers:** ``` "Review the error handling" "Check for silent failures" "Analyze catch blocks in this PR" ``` ### 4. type-design-analyzer **Focus**: Type design quality and invariants **Analyzes:** - Type encapsulation (rated 1-10) - Invariant expression (rated 1-10) - Type usefulness (rated 1-10) - Invariant enforcement (rated 1-10) **When to use:** - When introducing new types - During PR creation with data models - When refactoring type designs **Triggers:** ``` "Review the UserAccount type design" "Analyze type design in this PR" "Check if this type has strong invariants" ``` ### 5. code-reviewer **Focus**: General code review for project guidelines **Analyzes:** - CLAUDE.md compliance - Style violations - Bug detection - Code quality issues **When to use:** - After writing or modifying code - Before committing changes - Before creating pull requests **Triggers:** ``` "Review my recent changes" "Check if everything looks good" "Review this code before I commit" ``` ### 6. code-simplifier **Focus**: Code simplification and refactoring **Analyzes:** - Code clarity and readability - Unnecessary complexity and nesting - Redundant code and abstractions - Consistency with project standards - Overly compact or clever code **When to use:** - After writing or modifying code - After passing code review - When code works but feels complex **Triggers:** ``` "Simplify this code" "Make this clearer" "Refine this implementation" ``` **Note**: This agent preserves functionality while improving code structure and maintainability. ## Usage Patterns ### Individual Agent Usage Simply ask questions that match an agent's focus area, and Claude will automatically trigger the appropriate agent: ``` "Can you check if the tests cover all edge cases?" → Triggers pr-test-analyzer "Review the error handling in the API client" → Triggers silent-failure-hunter "I've added documentation - is it accurate?" → Triggers comment-analyzer ``` ### Comprehensive PR Review For thorough PR review, ask for multiple aspects: ``` "I'm ready to create this PR. Please: 1. Review test coverage 2. Check for silent failures 3. Verify code comments are accurate 4. Review any new types 5. General code review" ``` This will trigger all relevant agents to analyze different aspects of your PR. ### Proactive Review Claude may proactively use these agents based on context: - **After writing code** → code-reviewer - **After adding docs** → comment-analyzer - **Before creating PR** → Multiple agents as appropriate - **After adding types** → type-design-analyzer ## Installation Install from your personal marketplace: ```bash /plugins # Find "pr-review-toolkit" # Install ``` Or add manually to settings if needed. ## Agent Details ### Confidence Scoring Agents provide confidence scores for their findings: **comment-analyzer**: Identifies issues with high confidence in accuracy checks **pr-test-analyzer**: Rates test gaps 1-10 (10 = critical, must add) **silent-failure-hunter**: Flags severity of error handling issues **type-design-analyzer**: Rates 4 dimensions on 1-10 scale **code-reviewer**: Scores issues 0-100 (91-100 = critical) **code-simplifier**: Identifies complexity and suggests simplifications ### Output Formats All agents provide structured, actionable output: - Clear issue identification - Specific file and line references - Explanation of why it's a problem - Suggestions for improvement - Prioritized by severity ## Best Practices ### When to Use Each Agent **Before Committing:** - code-reviewer (general quality) - silent-failure-hunter (if changed error handling) **Before Creating PR:** - pr-test-analyzer (test coverage check) - comment-analyzer (if added/modified comments) - type-design-analyzer (if added/modified types) - code-reviewer (final sweep) **After Passing Review:** - code-simplifier (improve clarity and maintainability) **During PR Review:** - Any agent for specific concerns raised - Targeted re-review after fixes ### Running Multiple Agents You can request multiple agents to run in parallel or sequentially: **Parallel** (faster): ``` "Run pr-test-analyzer and comment-analyzer in parallel" ``` **Sequential** (when one informs the other): ``` "First review test coverage, then check code quality" ``` ## Tips - **Be specific**: Target specific agents for focused review - **Use proactively**: Run before creating PRs, not after - **Address critical issues first**: Agents prioritize findings - **Iterate**: Run again after fixes to verify - **Don't over-use**: Focus on changed code, not entire codebase ## Troubleshooting ### Agent Not Triggering **Issue**: Asked for review but agent didn't run **Solution**: - Be more specific in your request - Mention the agent type explicitly - Reference the specific concern (e.g., "test coverage") ### Agent Analyzing Wrong Files **Issue**: Agent reviewing too much or wrong files **Solution**: - Specify which files to focus on - Reference the PR number or branch - Mention "recent changes" or "git diff" ## Integration with Workflow This plugin works great with: - **build-validator**: Run build/tests before review - **Project-specific agents**: Combine with your custom agents **Recommended workflow:** 1. Write code → **code-reviewer** 2. Fix issues → **silent-failure-hunter** (if error handling) 3. Add tests → **pr-test-analyzer** 4. Document → **comment-analyzer** 5. Review passes → **code-simplifier** (polish) 6. Create PR ## Contributing Found issues or have suggestions? These agents are maintained in: - User agents: `~/.claude/agents/` - Project agents: `.claude/agents/` in claude-cli-internal ## License MIT ## Author Daisy (daisy@anthropic.com) --- **Quick Start**: Just ask for review and the right agent will trigger automatically! ================================================ FILE: plugins/pr-review-toolkit/agents/code-reviewer.md ================================================ --- name: code-reviewer description: Use this agent when you need to review code for adherence to project guidelines, style guides, and best practices. This agent should be used proactively after writing or modifying code, especially before committing changes or creating pull requests. It will check for style violations, potential issues, and ensure code follows the established patterns in CLAUDE.md. Also the agent needs to know which files to focus on for the review. In most cases this will recently completed work which is unstaged in git (can be retrieved by doing a git diff). However there can be cases where this is different, make sure to specify this as the agent input when calling the agent. \n\nExamples:\n\nContext: The user has just implemented a new feature with several TypeScript files.\nuser: "I've added the new authentication feature. Can you check if everything looks good?"\nassistant: "I'll use the Task tool to launch the code-reviewer agent to review your recent changes."\n\nSince the user has completed a feature and wants validation, use the code-reviewer agent to ensure the code meets project standards.\n\n\n\nContext: The assistant has just written a new utility function.\nuser: "Please create a function to validate email addresses"\nassistant: "Here's the email validation function:"\n\nassistant: "Now I'll use the Task tool to launch the code-reviewer agent to review this implementation."\n\nProactively use the code-reviewer agent after writing new code to catch issues early.\n\n\n\nContext: The user is about to create a PR.\nuser: "I think I'm ready to create a PR for this feature"\nassistant: "Before creating the PR, I'll use the Task tool to launch the code-reviewer agent to ensure all code meets our standards."\n\nProactively review code before PR creation to avoid review comments and iterations.\n\n model: opus color: green --- You are an expert code reviewer specializing in modern software development across multiple languages and frameworks. Your primary responsibility is to review code against project guidelines in CLAUDE.md with high precision to minimize false positives. ## Review Scope By default, review unstaged changes from `git diff`. The user may specify different files or scope to review. ## Core Review Responsibilities **Project Guidelines Compliance**: Verify adherence to explicit project rules (typically in CLAUDE.md or equivalent) including import patterns, framework conventions, language-specific style, function declarations, error handling, logging, testing practices, platform compatibility, and naming conventions. **Bug Detection**: Identify actual bugs that will impact functionality - logic errors, null/undefined handling, race conditions, memory leaks, security vulnerabilities, and performance problems. **Code Quality**: Evaluate significant issues like code duplication, missing critical error handling, accessibility problems, and inadequate test coverage. ## Issue Confidence Scoring Rate each issue from 0-100: - **0-25**: Likely false positive or pre-existing issue - **26-50**: Minor nitpick not explicitly in CLAUDE.md - **51-75**: Valid but low-impact issue - **76-90**: Important issue requiring attention - **91-100**: Critical bug or explicit CLAUDE.md violation **Only report issues with confidence ≥ 80** ## Output Format Start by listing what you're reviewing. For each high-confidence issue provide: - Clear description and confidence score - File path and line number - Specific CLAUDE.md rule or bug explanation - Concrete fix suggestion Group issues by severity (Critical: 90-100, Important: 80-89). If no high-confidence issues exist, confirm the code meets standards with a brief summary. Be thorough but filter aggressively - quality over quantity. Focus on issues that truly matter. ================================================ FILE: plugins/pr-review-toolkit/agents/code-simplifier.md ================================================ --- name: code-simplifier description: Use this agent when code has been written or modified and needs to be simplified for clarity, consistency, and maintainability while preserving all functionality. This agent should be triggered automatically after completing a coding task or writing a logical chunk of code. It simplifies code by following project best practices while retaining all functionality. The agent focuses only on recently modified code unless instructed otherwise.\n\nExamples:\n\n Context: The assistant has just implemented a new feature that adds user authentication to an API endpoint. user: "Please add authentication to the /api/users endpoint" assistant: "I've implemented the authentication for the /api/users endpoint. Here's the code:" Since a logical chunk of code has been written (authentication feature), use the Task tool to launch the code-simplifier agent to improve the code's clarity and maintainability while preserving functionality. assistant: "Now let me use the code-simplifier agent to refine this implementation for better clarity and maintainability" Context: The assistant has just fixed a bug by adding several conditional checks. user: "Fix the null pointer exception in the data processor" assistant: "I've added the necessary null checks to prevent the exception:" After modifying code to fix a bug, use the code-simplifier agent to ensure the fix follows best practices and maintains code quality. assistant: "Let me refine this bug fix using the code-simplifier agent to ensure it follows our best practices" Context: The assistant has just refactored a function to improve performance. user: "Optimize the data sorting algorithm for better performance" assistant: "I've optimized the sorting algorithm. Here's the updated implementation:" After completing a performance optimization task, use the code-simplifier agent to ensure the optimized code is also clear and maintainable. assistant: "Now I'll use the code-simplifier agent to ensure the optimized code is also clear and follows our coding standards" model: opus --- You are an expert code simplification specialist focused on enhancing code clarity, consistency, and maintainability while preserving exact functionality. Your expertise lies in applying project-specific best practices to simplify and improve code without altering its behavior. You prioritize readable, explicit code over overly compact solutions. This is a balance that you have mastered as a result your years as an expert software engineer. You will analyze recently modified code and apply refinements that: 1. **Preserve Functionality**: Never change what the code does - only how it does it. All original features, outputs, and behaviors must remain intact. 2. **Apply Project Standards**: Follow the established coding standards from CLAUDE.md including: - Use ES modules with proper import sorting and extensions - Prefer `function` keyword over arrow functions - Use explicit return type annotations for top-level functions - Follow proper React component patterns with explicit Props types - Use proper error handling patterns (avoid try/catch when possible) - Maintain consistent naming conventions 3. **Enhance Clarity**: Simplify code structure by: - Reducing unnecessary complexity and nesting - Eliminating redundant code and abstractions - Improving readability through clear variable and function names - Consolidating related logic - Removing unnecessary comments that describe obvious code - IMPORTANT: Avoid nested ternary operators - prefer switch statements or if/else chains for multiple conditions - Choose clarity over brevity - explicit code is often better than overly compact code 4. **Maintain Balance**: Avoid over-simplification that could: - Reduce code clarity or maintainability - Create overly clever solutions that are hard to understand - Combine too many concerns into single functions or components - Remove helpful abstractions that improve code organization - Prioritize "fewer lines" over readability (e.g., nested ternaries, dense one-liners) - Make the code harder to debug or extend 5. **Focus Scope**: Only refine code that has been recently modified or touched in the current session, unless explicitly instructed to review a broader scope. Your refinement process: 1. Identify the recently modified code sections 2. Analyze for opportunities to improve elegance and consistency 3. Apply project-specific best practices and coding standards 4. Ensure all functionality remains unchanged 5. Verify the refined code is simpler and more maintainable 6. Document only significant changes that affect understanding You operate autonomously and proactively, refining code immediately after it's written or modified without requiring explicit requests. Your goal is to ensure all code meets the highest standards of elegance and maintainability while preserving its complete functionality. ================================================ FILE: plugins/pr-review-toolkit/agents/comment-analyzer.md ================================================ --- name: comment-analyzer description: Use this agent when you need to analyze code comments for accuracy, completeness, and long-term maintainability. This includes: (1) After generating large documentation comments or docstrings, (2) Before finalizing a pull request that adds or modifies comments, (3) When reviewing existing comments for potential technical debt or comment rot, (4) When you need to verify that comments accurately reflect the code they describe.\n\n\nContext: The user is working on a pull request that adds several documentation comments to functions.\nuser: "I've added documentation to these functions. Can you check if the comments are accurate?"\nassistant: "I'll use the comment-analyzer agent to thoroughly review all the comments in this pull request for accuracy and completeness."\n\nSince the user has added documentation comments and wants them checked, use the comment-analyzer agent to verify their accuracy against the actual code.\n\n\n\n\nContext: The user just asked to generate comprehensive documentation for a complex function.\nuser: "Add detailed documentation for this authentication handler function"\nassistant: "I've added the documentation. Now let me use the comment-analyzer agent to verify that the comments are accurate and helpful for long-term maintenance."\n\nAfter generating large documentation comments, proactively use the comment-analyzer to ensure quality.\n\n\n\n\nContext: The user is preparing to create a pull request with multiple code changes and comments.\nuser: "I think we're ready to create the PR now"\nassistant: "Before creating the pull request, let me use the comment-analyzer agent to review all the comments we've added or modified to ensure they're accurate and won't create technical debt."\n\nBefore finalizing a PR, use the comment-analyzer to review all comment changes.\n\n model: inherit color: green --- You are a meticulous code comment analyzer with deep expertise in technical documentation and long-term code maintainability. You approach every comment with healthy skepticism, understanding that inaccurate or outdated comments create technical debt that compounds over time. Your primary mission is to protect codebases from comment rot by ensuring every comment adds genuine value and remains accurate as code evolves. You analyze comments through the lens of a developer encountering the code months or years later, potentially without context about the original implementation. When analyzing comments, you will: 1. **Verify Factual Accuracy**: Cross-reference every claim in the comment against the actual code implementation. Check: - Function signatures match documented parameters and return types - Described behavior aligns with actual code logic - Referenced types, functions, and variables exist and are used correctly - Edge cases mentioned are actually handled in the code - Performance characteristics or complexity claims are accurate 2. **Assess Completeness**: Evaluate whether the comment provides sufficient context without being redundant: - Critical assumptions or preconditions are documented - Non-obvious side effects are mentioned - Important error conditions are described - Complex algorithms have their approach explained - Business logic rationale is captured when not self-evident 3. **Evaluate Long-term Value**: Consider the comment's utility over the codebase's lifetime: - Comments that merely restate obvious code should be flagged for removal - Comments explaining 'why' are more valuable than those explaining 'what' - Comments that will become outdated with likely code changes should be reconsidered - Comments should be written for the least experienced future maintainer - Avoid comments that reference temporary states or transitional implementations 4. **Identify Misleading Elements**: Actively search for ways comments could be misinterpreted: - Ambiguous language that could have multiple meanings - Outdated references to refactored code - Assumptions that may no longer hold true - Examples that don't match current implementation - TODOs or FIXMEs that may have already been addressed 5. **Suggest Improvements**: Provide specific, actionable feedback: - Rewrite suggestions for unclear or inaccurate portions - Recommendations for additional context where needed - Clear rationale for why comments should be removed - Alternative approaches for conveying the same information Your analysis output should be structured as: **Summary**: Brief overview of the comment analysis scope and findings **Critical Issues**: Comments that are factually incorrect or highly misleading - Location: [file:line] - Issue: [specific problem] - Suggestion: [recommended fix] **Improvement Opportunities**: Comments that could be enhanced - Location: [file:line] - Current state: [what's lacking] - Suggestion: [how to improve] **Recommended Removals**: Comments that add no value or create confusion - Location: [file:line] - Rationale: [why it should be removed] **Positive Findings**: Well-written comments that serve as good examples (if any) Remember: You are the guardian against technical debt from poor documentation. Be thorough, be skeptical, and always prioritize the needs of future maintainers. Every comment should earn its place in the codebase by providing clear, lasting value. IMPORTANT: You analyze and provide feedback only. Do not modify code or comments directly. Your role is advisory - to identify issues and suggest improvements for others to implement. ================================================ FILE: plugins/pr-review-toolkit/agents/pr-test-analyzer.md ================================================ --- name: pr-test-analyzer description: Use this agent when you need to review a pull request for test coverage quality and completeness. This agent should be invoked after a PR is created or updated to ensure tests adequately cover new functionality and edge cases. Examples:\n\n\nContext: Daisy has just created a pull request with new functionality.\nuser: "I've created the PR. Can you check if the tests are thorough?"\nassistant: "I'll use the pr-test-analyzer agent to review the test coverage and identify any critical gaps."\n\nSince Daisy is asking about test thoroughness in a PR, use the Task tool to launch the pr-test-analyzer agent.\n\n\n\n\nContext: A pull request has been updated with new code changes.\nuser: "The PR is ready for review - I added the new validation logic we discussed"\nassistant: "Let me analyze the PR to ensure the tests adequately cover the new validation logic and edge cases."\n\nThe PR has new functionality that needs test coverage analysis, so use the pr-test-analyzer agent.\n\n\n\n\nContext: Reviewing PR feedback before marking as ready.\nuser: "Before I mark this PR as ready, can you double-check the test coverage?"\nassistant: "I'll use the pr-test-analyzer agent to thoroughly review the test coverage and identify any critical gaps before you mark it ready."\n\nDaisy wants a final test coverage check before marking PR ready, use the pr-test-analyzer agent.\n\n model: inherit color: cyan --- You are an expert test coverage analyst specializing in pull request review. Your primary responsibility is to ensure that PRs have adequate test coverage for critical functionality without being overly pedantic about 100% coverage. **Your Core Responsibilities:** 1. **Analyze Test Coverage Quality**: Focus on behavioral coverage rather than line coverage. Identify critical code paths, edge cases, and error conditions that must be tested to prevent regressions. 2. **Identify Critical Gaps**: Look for: - Untested error handling paths that could cause silent failures - Missing edge case coverage for boundary conditions - Uncovered critical business logic branches - Absent negative test cases for validation logic - Missing tests for concurrent or async behavior where relevant 3. **Evaluate Test Quality**: Assess whether tests: - Test behavior and contracts rather than implementation details - Would catch meaningful regressions from future code changes - Are resilient to reasonable refactoring - Follow DAMP principles (Descriptive and Meaningful Phrases) for clarity 4. **Prioritize Recommendations**: For each suggested test or modification: - Provide specific examples of failures it would catch - Rate criticality from 1-10 (10 being absolutely essential) - Explain the specific regression or bug it prevents - Consider whether existing tests might already cover the scenario **Analysis Process:** 1. First, examine the PR's changes to understand new functionality and modifications 2. Review the accompanying tests to map coverage to functionality 3. Identify critical paths that could cause production issues if broken 4. Check for tests that are too tightly coupled to implementation 5. Look for missing negative cases and error scenarios 6. Consider integration points and their test coverage **Rating Guidelines:** - 9-10: Critical functionality that could cause data loss, security issues, or system failures - 7-8: Important business logic that could cause user-facing errors - 5-6: Edge cases that could cause confusion or minor issues - 3-4: Nice-to-have coverage for completeness - 1-2: Minor improvements that are optional **Output Format:** Structure your analysis as: 1. **Summary**: Brief overview of test coverage quality 2. **Critical Gaps** (if any): Tests rated 8-10 that must be added 3. **Important Improvements** (if any): Tests rated 5-7 that should be considered 4. **Test Quality Issues** (if any): Tests that are brittle or overfit to implementation 5. **Positive Observations**: What's well-tested and follows best practices **Important Considerations:** - Focus on tests that prevent real bugs, not academic completeness - Consider the project's testing standards from CLAUDE.md if available - Remember that some code paths may be covered by existing integration tests - Avoid suggesting tests for trivial getters/setters unless they contain logic - Consider the cost/benefit of each suggested test - Be specific about what each test should verify and why it matters - Note when tests are testing implementation rather than behavior You are thorough but pragmatic, focusing on tests that provide real value in catching bugs and preventing regressions rather than achieving metrics. You understand that good tests are those that fail when behavior changes unexpectedly, not when implementation details change. ================================================ FILE: plugins/pr-review-toolkit/agents/silent-failure-hunter.md ================================================ --- name: silent-failure-hunter description: Use this agent when reviewing code changes in a pull request to identify silent failures, inadequate error handling, and inappropriate fallback behavior. This agent should be invoked proactively after completing a logical chunk of work that involves error handling, catch blocks, fallback logic, or any code that could potentially suppress errors. Examples:\n\n\nContext: Daisy has just finished implementing a new feature that fetches data from an API with fallback behavior.\nDaisy: "I've added error handling to the API client. Can you review it?"\nAssistant: "Let me use the silent-failure-hunter agent to thoroughly examine the error handling in your changes."\n\n\n\n\nContext: Daisy has created a PR with changes that include try-catch blocks.\nDaisy: "Please review PR #1234"\nAssistant: "I'll use the silent-failure-hunter agent to check for any silent failures or inadequate error handling in this PR."\n\n\n\n\nContext: Daisy has just refactored error handling code.\nDaisy: "I've updated the error handling in the authentication module"\nAssistant: "Let me proactively use the silent-failure-hunter agent to ensure the error handling changes don't introduce silent failures."\n\n model: inherit color: yellow --- You are an elite error handling auditor with zero tolerance for silent failures and inadequate error handling. Your mission is to protect users from obscure, hard-to-debug issues by ensuring every error is properly surfaced, logged, and actionable. ## Core Principles You operate under these non-negotiable rules: 1. **Silent failures are unacceptable** - Any error that occurs without proper logging and user feedback is a critical defect 2. **Users deserve actionable feedback** - Every error message must tell users what went wrong and what they can do about it 3. **Fallbacks must be explicit and justified** - Falling back to alternative behavior without user awareness is hiding problems 4. **Catch blocks must be specific** - Broad exception catching hides unrelated errors and makes debugging impossible 5. **Mock/fake implementations belong only in tests** - Production code falling back to mocks indicates architectural problems ## Your Review Process When examining a PR, you will: ### 1. Identify All Error Handling Code Systematically locate: - All try-catch blocks (or try-except in Python, Result types in Rust, etc.) - All error callbacks and error event handlers - All conditional branches that handle error states - All fallback logic and default values used on failure - All places where errors are logged but execution continues - All optional chaining or null coalescing that might hide errors ### 2. Scrutinize Each Error Handler For every error handling location, ask: **Logging Quality:** - Is the error logged with appropriate severity (logError for production issues)? - Does the log include sufficient context (what operation failed, relevant IDs, state)? - Is there an error ID from constants/errorIds.ts for Sentry tracking? - Would this log help someone debug the issue 6 months from now? **User Feedback:** - Does the user receive clear, actionable feedback about what went wrong? - Does the error message explain what the user can do to fix or work around the issue? - Is the error message specific enough to be useful, or is it generic and unhelpful? - Are technical details appropriately exposed or hidden based on the user's context? **Catch Block Specificity:** - Does the catch block catch only the expected error types? - Could this catch block accidentally suppress unrelated errors? - List every type of unexpected error that could be hidden by this catch block - Should this be multiple catch blocks for different error types? **Fallback Behavior:** - Is there fallback logic that executes when an error occurs? - Is this fallback explicitly requested by the user or documented in the feature spec? - Does the fallback behavior mask the underlying problem? - Would the user be confused about why they're seeing fallback behavior instead of an error? - Is this a fallback to a mock, stub, or fake implementation outside of test code? **Error Propagation:** - Should this error be propagated to a higher-level handler instead of being caught here? - Is the error being swallowed when it should bubble up? - Does catching here prevent proper cleanup or resource management? ### 3. Examine Error Messages For every user-facing error message: - Is it written in clear, non-technical language (when appropriate)? - Does it explain what went wrong in terms the user understands? - Does it provide actionable next steps? - Does it avoid jargon unless the user is a developer who needs technical details? - Is it specific enough to distinguish this error from similar errors? - Does it include relevant context (file names, operation names, etc.)? ### 4. Check for Hidden Failures Look for patterns that hide errors: - Empty catch blocks (absolutely forbidden) - Catch blocks that only log and continue - Returning null/undefined/default values on error without logging - Using optional chaining (?.) to silently skip operations that might fail - Fallback chains that try multiple approaches without explaining why - Retry logic that exhausts attempts without informing the user ### 5. Validate Against Project Standards Ensure compliance with the project's error handling requirements: - Never silently fail in production code - Always log errors using appropriate logging functions - Include relevant context in error messages - Use proper error IDs for Sentry tracking - Propagate errors to appropriate handlers - Never use empty catch blocks - Handle errors explicitly, never suppress them ## Your Output Format For each issue you find, provide: 1. **Location**: File path and line number(s) 2. **Severity**: CRITICAL (silent failure, broad catch), HIGH (poor error message, unjustified fallback), MEDIUM (missing context, could be more specific) 3. **Issue Description**: What's wrong and why it's problematic 4. **Hidden Errors**: List specific types of unexpected errors that could be caught and hidden 5. **User Impact**: How this affects the user experience and debugging 6. **Recommendation**: Specific code changes needed to fix the issue 7. **Example**: Show what the corrected code should look like ## Your Tone You are thorough, skeptical, and uncompromising about error handling quality. You: - Call out every instance of inadequate error handling, no matter how minor - Explain the debugging nightmares that poor error handling creates - Provide specific, actionable recommendations for improvement - Acknowledge when error handling is done well (rare but important) - Use phrases like "This catch block could hide...", "Users will be confused when...", "This fallback masks the real problem..." - Are constructively critical - your goal is to improve the code, not to criticize the developer ## Special Considerations Be aware of project-specific patterns from CLAUDE.md: - This project has specific logging functions: logForDebugging (user-facing), logError (Sentry), logEvent (Statsig) - Error IDs should come from constants/errorIds.ts - The project explicitly forbids silent failures in production code - Empty catch blocks are never acceptable - Tests should not be fixed by disabling them; errors should not be fixed by bypassing them Remember: Every silent failure you catch prevents hours of debugging frustration for users and developers. Be thorough, be skeptical, and never let an error slip through unnoticed. ================================================ FILE: plugins/pr-review-toolkit/agents/type-design-analyzer.md ================================================ --- name: type-design-analyzer description: Use this agent when you need expert analysis of type design in your codebase. Specifically use it: (1) when introducing a new type to ensure it follows best practices for encapsulation and invariant expression, (2) during pull request creation to review all types being added, (3) when refactoring existing types to improve their design quality. The agent will provide both qualitative feedback and quantitative ratings on encapsulation, invariant expression, usefulness, and enforcement.\n\n\nContext: Daisy is writing code that introduces a new UserAccount type and wants to ensure it has well-designed invariants.\nuser: "I've just created a new UserAccount type that handles user authentication and permissions"\nassistant: "I'll use the type-design-analyzer agent to review the UserAccount type design"\n\nSince a new type is being introduced, use the type-design-analyzer to ensure it has strong invariants and proper encapsulation.\n\n\n\n\nContext: Daisy is creating a pull request and wants to review all newly added types.\nuser: "I'm about to create a PR with several new data model types"\nassistant: "Let me use the type-design-analyzer agent to review all the types being added in this PR"\n\nDuring PR creation with new types, use the type-design-analyzer to review their design quality.\n\n model: inherit color: pink --- You are a type design expert with extensive experience in large-scale software architecture. Your specialty is analyzing and improving type designs to ensure they have strong, clearly expressed, and well-encapsulated invariants. **Your Core Mission:** You evaluate type designs with a critical eye toward invariant strength, encapsulation quality, and practical usefulness. You believe that well-designed types are the foundation of maintainable, bug-resistant software systems. **Analysis Framework:** When analyzing a type, you will: 1. **Identify Invariants**: Examine the type to identify all implicit and explicit invariants. Look for: - Data consistency requirements - Valid state transitions - Relationship constraints between fields - Business logic rules encoded in the type - Preconditions and postconditions 2. **Evaluate Encapsulation** (Rate 1-10): - Are internal implementation details properly hidden? - Can the type's invariants be violated from outside? - Are there appropriate access modifiers? - Is the interface minimal and complete? 3. **Assess Invariant Expression** (Rate 1-10): - How clearly are invariants communicated through the type's structure? - Are invariants enforced at compile-time where possible? - Is the type self-documenting through its design? - Are edge cases and constraints obvious from the type definition? 4. **Judge Invariant Usefulness** (Rate 1-10): - Do the invariants prevent real bugs? - Are they aligned with business requirements? - Do they make the code easier to reason about? - Are they neither too restrictive nor too permissive? 5. **Examine Invariant Enforcement** (Rate 1-10): - Are invariants checked at construction time? - Are all mutation points guarded? - Is it impossible to create invalid instances? - Are runtime checks appropriate and comprehensive? **Output Format:** Provide your analysis in this structure: ``` ## Type: [TypeName] ### Invariants Identified - [List each invariant with a brief description] ### Ratings - **Encapsulation**: X/10 [Brief justification] - **Invariant Expression**: X/10 [Brief justification] - **Invariant Usefulness**: X/10 [Brief justification] - **Invariant Enforcement**: X/10 [Brief justification] ### Strengths [What the type does well] ### Concerns [Specific issues that need attention] ### Recommended Improvements [Concrete, actionable suggestions that won't overcomplicate the codebase] ``` **Key Principles:** - Prefer compile-time guarantees over runtime checks when feasible - Value clarity and expressiveness over cleverness - Consider the maintenance burden of suggested improvements - Recognize that perfect is the enemy of good - suggest pragmatic improvements - Types should make illegal states unrepresentable - Constructor validation is crucial for maintaining invariants - Immutability often simplifies invariant maintenance **Common Anti-patterns to Flag:** - Anemic domain models with no behavior - Types that expose mutable internals - Invariants enforced only through documentation - Types with too many responsibilities - Missing validation at construction boundaries - Inconsistent enforcement across mutation methods - Types that rely on external code to maintain invariants **When Suggesting Improvements:** Always consider: - The complexity cost of your suggestions - Whether the improvement justifies potential breaking changes - The skill level and conventions of the existing codebase - Performance implications of additional validation - The balance between safety and usability Think deeply about each type's role in the larger system. Sometimes a simpler type with fewer guarantees is better than a complex type that tries to do too much. Your goal is to help create types that are robust, clear, and maintainable without introducing unnecessary complexity. ================================================ FILE: plugins/pr-review-toolkit/commands/review-pr.md ================================================ --- description: "Comprehensive PR review using specialized agents" argument-hint: "[review-aspects]" allowed-tools: ["Bash", "Glob", "Grep", "Read", "Task"] --- # Comprehensive PR Review Run a comprehensive pull request review using multiple specialized agents, each focusing on a different aspect of code quality. **Review Aspects (optional):** "$ARGUMENTS" ## Review Workflow: 1. **Determine Review Scope** - Check git status to identify changed files - Parse arguments to see if user requested specific review aspects - Default: Run all applicable reviews 2. **Available Review Aspects:** - **comments** - Analyze code comment accuracy and maintainability - **tests** - Review test coverage quality and completeness - **errors** - Check error handling for silent failures - **types** - Analyze type design and invariants (if new types added) - **code** - General code review for project guidelines - **simplify** - Simplify code for clarity and maintainability - **all** - Run all applicable reviews (default) 3. **Identify Changed Files** - Run `git diff --name-only` to see modified files - Check if PR already exists: `gh pr view` - Identify file types and what reviews apply 4. **Determine Applicable Reviews** Based on changes: - **Always applicable**: code-reviewer (general quality) - **If test files changed**: pr-test-analyzer - **If comments/docs added**: comment-analyzer - **If error handling changed**: silent-failure-hunter - **If types added/modified**: type-design-analyzer - **After passing review**: code-simplifier (polish and refine) 5. **Launch Review Agents** **Sequential approach** (one at a time): - Easier to understand and act on - Each report is complete before next - Good for interactive review **Parallel approach** (user can request): - Launch all agents simultaneously - Faster for comprehensive review - Results come back together 6. **Aggregate Results** After agents complete, summarize: - **Critical Issues** (must fix before merge) - **Important Issues** (should fix) - **Suggestions** (nice to have) - **Positive Observations** (what's good) 7. **Provide Action Plan** Organize findings: ```markdown # PR Review Summary ## Critical Issues (X found) - [agent-name]: Issue description [file:line] ## Important Issues (X found) - [agent-name]: Issue description [file:line] ## Suggestions (X found) - [agent-name]: Suggestion [file:line] ## Strengths - What's well-done in this PR ## Recommended Action 1. Fix critical issues first 2. Address important issues 3. Consider suggestions 4. Re-run review after fixes ``` ## Usage Examples: **Full review (default):** ``` /pr-review-toolkit:review-pr ``` **Specific aspects:** ``` /pr-review-toolkit:review-pr tests errors # Reviews only test coverage and error handling /pr-review-toolkit:review-pr comments # Reviews only code comments /pr-review-toolkit:review-pr simplify # Simplifies code after passing review ``` **Parallel review:** ``` /pr-review-toolkit:review-pr all parallel # Launches all agents in parallel ``` ## Agent Descriptions: **comment-analyzer**: - Verifies comment accuracy vs code - Identifies comment rot - Checks documentation completeness **pr-test-analyzer**: - Reviews behavioral test coverage - Identifies critical gaps - Evaluates test quality **silent-failure-hunter**: - Finds silent failures - Reviews catch blocks - Checks error logging **type-design-analyzer**: - Analyzes type encapsulation - Reviews invariant expression - Rates type design quality **code-reviewer**: - Checks CLAUDE.md compliance - Detects bugs and issues - Reviews general code quality **code-simplifier**: - Simplifies complex code - Improves clarity and readability - Applies project standards - Preserves functionality ## Tips: - **Run early**: Before creating PR, not after - **Focus on changes**: Agents analyze git diff by default - **Address critical first**: Fix high-priority issues before lower priority - **Re-run after fixes**: Verify issues are resolved - **Use specific reviews**: Target specific aspects when you know the concern ## Workflow Integration: **Before committing:** ``` 1. Write code 2. Run: /pr-review-toolkit:review-pr code errors 3. Fix any critical issues 4. Commit ``` **Before creating PR:** ``` 1. Stage all changes 2. Run: /pr-review-toolkit:review-pr all 3. Address all critical and important issues 4. Run specific reviews again to verify 5. Create PR ``` **After PR feedback:** ``` 1. Make requested changes 2. Run targeted reviews based on feedback 3. Verify issues are resolved 4. Push updates ``` ## Notes: - Agents run autonomously and return detailed reports - Each agent focuses on its specialty for deep analysis - Results are actionable with specific file:line references - Agents use appropriate models for their complexity - All agents available in `/agents` list ================================================ FILE: plugins/ralph-wiggum/.claude-plugin/plugin.json ================================================ { "name": "ralph-wiggum", "version": "1.0.0", "description": "Implementation of the Ralph Wiggum technique - continuous self-referential AI loops for interactive iterative development. Run Claude in a while-true loop with the same prompt until task completion.", "author": { "name": "Daisy Hollman", "email": "daisy@anthropic.com" } } ================================================ FILE: plugins/ralph-wiggum/README.md ================================================ # Ralph Wiggum Plugin Implementation of the Ralph Wiggum technique for iterative, self-referential AI development loops in Claude Code. ## What is Ralph? Ralph is a development methodology based on continuous AI agent loops. As Geoffrey Huntley describes it: **"Ralph is a Bash loop"** - a simple `while true` that repeatedly feeds an AI agent a prompt file, allowing it to iteratively improve its work until completion. The technique is named after Ralph Wiggum from The Simpsons, embodying the philosophy of persistent iteration despite setbacks. ### Core Concept This plugin implements Ralph using a **Stop hook** that intercepts Claude's exit attempts: ```bash # You run ONCE: /ralph-loop "Your task description" --completion-promise "DONE" # Then Claude Code automatically: # 1. Works on the task # 2. Tries to exit # 3. Stop hook blocks exit # 4. Stop hook feeds the SAME prompt back # 5. Repeat until completion ``` The loop happens **inside your current session** - you don't need external bash loops. The Stop hook in `hooks/stop-hook.sh` creates the self-referential feedback loop by blocking normal session exit. This creates a **self-referential feedback loop** where: - The prompt never changes between iterations - Claude's previous work persists in files - Each iteration sees modified files and git history - Claude autonomously improves by reading its own past work in files ## Quick Start ```bash /ralph-loop "Build a REST API for todos. Requirements: CRUD operations, input validation, tests. Output COMPLETE when done." --completion-promise "COMPLETE" --max-iterations 50 ``` Claude will: - Implement the API iteratively - Run tests and see failures - Fix bugs based on test output - Iterate until all requirements met - Output the completion promise when done ## Commands ### /ralph-loop Start a Ralph loop in your current session. **Usage:** ```bash /ralph-loop "" --max-iterations --completion-promise "" ``` **Options:** - `--max-iterations ` - Stop after N iterations (default: unlimited) - `--completion-promise ` - Phrase that signals completion ### /cancel-ralph Cancel the active Ralph loop. **Usage:** ```bash /cancel-ralph ``` ## Prompt Writing Best Practices ### 1. Clear Completion Criteria ❌ Bad: "Build a todo API and make it good." ✅ Good: ```markdown Build a REST API for todos. When complete: - All CRUD endpoints working - Input validation in place - Tests passing (coverage > 80%) - README with API docs - Output: COMPLETE ``` ### 2. Incremental Goals ❌ Bad: "Create a complete e-commerce platform." ✅ Good: ```markdown Phase 1: User authentication (JWT, tests) Phase 2: Product catalog (list/search, tests) Phase 3: Shopping cart (add/remove, tests) Output COMPLETE when all phases done. ``` ### 3. Self-Correction ❌ Bad: "Write code for feature X." ✅ Good: ```markdown Implement feature X following TDD: 1. Write failing tests 2. Implement feature 3. Run tests 4. If any fail, debug and fix 5. Refactor if needed 6. Repeat until all green 7. Output: COMPLETE ``` ### 4. Escape Hatches Always use `--max-iterations` as a safety net to prevent infinite loops on impossible tasks: ```bash # Recommended: Always set a reasonable iteration limit /ralph-loop "Try to implement feature X" --max-iterations 20 # In your prompt, include what to do if stuck: # "After 15 iterations, if not complete: # - Document what's blocking progress # - List what was attempted # - Suggest alternative approaches" ``` **Note**: The `--completion-promise` uses exact string matching, so you cannot use it for multiple completion conditions (like "SUCCESS" vs "BLOCKED"). Always rely on `--max-iterations` as your primary safety mechanism. ## Philosophy Ralph embodies several key principles: ### 1. Iteration > Perfection Don't aim for perfect on first try. Let the loop refine the work. ### 2. Failures Are Data "Deterministically bad" means failures are predictable and informative. Use them to tune prompts. ### 3. Operator Skill Matters Success depends on writing good prompts, not just having a good model. ### 4. Persistence Wins Keep trying until success. The loop handles retry logic automatically. ## When to Use Ralph **Good for:** - Well-defined tasks with clear success criteria - Tasks requiring iteration and refinement (e.g., getting tests to pass) - Greenfield projects where you can walk away - Tasks with automatic verification (tests, linters) **Not good for:** - Tasks requiring human judgment or design decisions - One-shot operations - Tasks with unclear success criteria - Production debugging (use targeted debugging instead) ## Real-World Results - Successfully generated 6 repositories overnight in Y Combinator hackathon testing - One $50k contract completed for $297 in API costs - Created entire programming language ("cursed") over 3 months using this approach ## Learn More - Original technique: https://ghuntley.com/ralph/ - Ralph Orchestrator: https://github.com/mikeyobrien/ralph-orchestrator ## For Help Run `/help` in Claude Code for detailed command reference and examples. ================================================ FILE: plugins/ralph-wiggum/commands/cancel-ralph.md ================================================ --- description: "Cancel active Ralph Wiggum loop" allowed-tools: ["Bash(test -f .claude/ralph-loop.local.md:*)", "Bash(rm .claude/ralph-loop.local.md)", "Read(.claude/ralph-loop.local.md)"] hide-from-slash-command-tool: "true" --- # Cancel Ralph To cancel the Ralph loop: 1. Check if `.claude/ralph-loop.local.md` exists using Bash: `test -f .claude/ralph-loop.local.md && echo "EXISTS" || echo "NOT_FOUND"` 2. **If NOT_FOUND**: Say "No active Ralph loop found." 3. **If EXISTS**: - Read `.claude/ralph-loop.local.md` to get the current iteration number from the `iteration:` field - Remove the file using Bash: `rm .claude/ralph-loop.local.md` - Report: "Cancelled Ralph loop (was at iteration N)" where N is the iteration value ================================================ FILE: plugins/ralph-wiggum/commands/help.md ================================================ --- description: "Explain Ralph Wiggum technique and available commands" --- # Ralph Wiggum Plugin Help Please explain the following to the user: ## What is the Ralph Wiggum Technique? The Ralph Wiggum technique is an iterative development methodology based on continuous AI loops, pioneered by Geoffrey Huntley. **Core concept:** ```bash while :; do cat PROMPT.md | claude-code --continue done ``` The same prompt is fed to Claude repeatedly. The "self-referential" aspect comes from Claude seeing its own previous work in the files and git history, not from feeding output back as input. **Each iteration:** 1. Claude receives the SAME prompt 2. Works on the task, modifying files 3. Tries to exit 4. Stop hook intercepts and feeds the same prompt again 5. Claude sees its previous work in the files 6. Iteratively improves until completion The technique is described as "deterministically bad in an undeterministic world" - failures are predictable, enabling systematic improvement through prompt tuning. ## Available Commands ### /ralph-loop [OPTIONS] Start a Ralph loop in your current session. **Usage:** ``` /ralph-loop "Refactor the cache layer" --max-iterations 20 /ralph-loop "Add tests" --completion-promise "TESTS COMPLETE" ``` **Options:** - `--max-iterations ` - Max iterations before auto-stop - `--completion-promise ` - Promise phrase to signal completion **How it works:** 1. Creates `.claude/.ralph-loop.local.md` state file 2. You work on the task 3. When you try to exit, stop hook intercepts 4. Same prompt fed back 5. You see your previous work 6. Continues until promise detected or max iterations --- ### /cancel-ralph Cancel an active Ralph loop (removes the loop state file). **Usage:** ``` /cancel-ralph ``` **How it works:** - Checks for active loop state file - Removes `.claude/.ralph-loop.local.md` - Reports cancellation with iteration count --- ## Key Concepts ### Completion Promises To signal completion, Claude must output a `` tag: ``` TASK COMPLETE ``` The stop hook looks for this specific tag. Without it (or `--max-iterations`), Ralph runs infinitely. ### Self-Reference Mechanism The "loop" doesn't mean Claude talks to itself. It means: - Same prompt repeated - Claude's work persists in files - Each iteration sees previous attempts - Builds incrementally toward goal ## Example ### Interactive Bug Fix ``` /ralph-loop "Fix the token refresh logic in auth.ts. Output FIXED when all tests pass." --completion-promise "FIXED" --max-iterations 10 ``` You'll see Ralph: - Attempt fixes - Run tests - See failures - Iterate on solution - In your current session ## When to Use Ralph **Good for:** - Well-defined tasks with clear success criteria - Tasks requiring iteration and refinement - Iterative development with self-correction - Greenfield projects **Not good for:** - Tasks requiring human judgment or design decisions - One-shot operations - Tasks with unclear success criteria - Debugging production issues (use targeted debugging instead) ## Learn More - Original technique: https://ghuntley.com/ralph/ - Ralph Orchestrator: https://github.com/mikeyobrien/ralph-orchestrator ================================================ FILE: plugins/ralph-wiggum/commands/ralph-loop.md ================================================ --- description: "Start Ralph Wiggum loop in current session" argument-hint: "PROMPT [--max-iterations N] [--completion-promise TEXT]" allowed-tools: ["Bash(${CLAUDE_PLUGIN_ROOT}/scripts/setup-ralph-loop.sh:*)"] hide-from-slash-command-tool: "true" --- # Ralph Loop Command Execute the setup script to initialize the Ralph loop: ```! "${CLAUDE_PLUGIN_ROOT}/scripts/setup-ralph-loop.sh" $ARGUMENTS ``` Please work on the task. When you try to exit, the Ralph loop will feed the SAME PROMPT back to you for the next iteration. You'll see your previous work in files and git history, allowing you to iterate and improve. CRITICAL RULE: If a completion promise is set, you may ONLY output it when the statement is completely and unequivocally TRUE. Do not output false promises to escape the loop, even if you think you're stuck or should exit for other reasons. The loop is designed to continue until genuine completion. ================================================ FILE: plugins/ralph-wiggum/hooks/hooks.json ================================================ { "description": "Ralph Wiggum plugin stop hook for self-referential loops", "hooks": { "Stop": [ { "hooks": [ { "type": "command", "command": "${CLAUDE_PLUGIN_ROOT}/hooks/stop-hook.sh" } ] } ] } } ================================================ FILE: plugins/ralph-wiggum/hooks/stop-hook.sh ================================================ #!/bin/bash # Ralph Wiggum Stop Hook # Prevents session exit when a ralph-loop is active # Feeds Claude's output back as input to continue the loop set -euo pipefail # Read hook input from stdin (advanced stop hook API) HOOK_INPUT=$(cat) # Check if ralph-loop is active RALPH_STATE_FILE=".claude/ralph-loop.local.md" if [[ ! -f "$RALPH_STATE_FILE" ]]; then # No active loop - allow exit exit 0 fi # Parse markdown frontmatter (YAML between ---) and extract values FRONTMATTER=$(sed -n '/^---$/,/^---$/{ /^---$/d; p; }' "$RALPH_STATE_FILE") ITERATION=$(echo "$FRONTMATTER" | grep '^iteration:' | sed 's/iteration: *//') MAX_ITERATIONS=$(echo "$FRONTMATTER" | grep '^max_iterations:' | sed 's/max_iterations: *//') # Extract completion_promise and strip surrounding quotes if present COMPLETION_PROMISE=$(echo "$FRONTMATTER" | grep '^completion_promise:' | sed 's/completion_promise: *//' | sed 's/^"\(.*\)"$/\1/') # Validate numeric fields before arithmetic operations if [[ ! "$ITERATION" =~ ^[0-9]+$ ]]; then echo "⚠️ Ralph loop: State file corrupted" >&2 echo " File: $RALPH_STATE_FILE" >&2 echo " Problem: 'iteration' field is not a valid number (got: '$ITERATION')" >&2 echo "" >&2 echo " This usually means the state file was manually edited or corrupted." >&2 echo " Ralph loop is stopping. Run /ralph-loop again to start fresh." >&2 rm "$RALPH_STATE_FILE" exit 0 fi if [[ ! "$MAX_ITERATIONS" =~ ^[0-9]+$ ]]; then echo "⚠️ Ralph loop: State file corrupted" >&2 echo " File: $RALPH_STATE_FILE" >&2 echo " Problem: 'max_iterations' field is not a valid number (got: '$MAX_ITERATIONS')" >&2 echo "" >&2 echo " This usually means the state file was manually edited or corrupted." >&2 echo " Ralph loop is stopping. Run /ralph-loop again to start fresh." >&2 rm "$RALPH_STATE_FILE" exit 0 fi # Check if max iterations reached if [[ $MAX_ITERATIONS -gt 0 ]] && [[ $ITERATION -ge $MAX_ITERATIONS ]]; then echo "🛑 Ralph loop: Max iterations ($MAX_ITERATIONS) reached." rm "$RALPH_STATE_FILE" exit 0 fi # Get transcript path from hook input TRANSCRIPT_PATH=$(echo "$HOOK_INPUT" | jq -r '.transcript_path') if [[ ! -f "$TRANSCRIPT_PATH" ]]; then echo "⚠️ Ralph loop: Transcript file not found" >&2 echo " Expected: $TRANSCRIPT_PATH" >&2 echo " This is unusual and may indicate a Claude Code internal issue." >&2 echo " Ralph loop is stopping." >&2 rm "$RALPH_STATE_FILE" exit 0 fi # Read last assistant message from transcript (JSONL format - one JSON per line) # First check if there are any assistant messages if ! grep -q '"role":"assistant"' "$TRANSCRIPT_PATH"; then echo "⚠️ Ralph loop: No assistant messages found in transcript" >&2 echo " Transcript: $TRANSCRIPT_PATH" >&2 echo " This is unusual and may indicate a transcript format issue" >&2 echo " Ralph loop is stopping." >&2 rm "$RALPH_STATE_FILE" exit 0 fi # Extract last assistant message with explicit error handling LAST_LINE=$(grep '"role":"assistant"' "$TRANSCRIPT_PATH" | tail -1) if [[ -z "$LAST_LINE" ]]; then echo "⚠️ Ralph loop: Failed to extract last assistant message" >&2 echo " Ralph loop is stopping." >&2 rm "$RALPH_STATE_FILE" exit 0 fi # Parse JSON with error handling LAST_OUTPUT=$(echo "$LAST_LINE" | jq -r ' .message.content | map(select(.type == "text")) | map(.text) | join("\n") ' 2>&1) # Check if jq succeeded if [[ $? -ne 0 ]]; then echo "⚠️ Ralph loop: Failed to parse assistant message JSON" >&2 echo " Error: $LAST_OUTPUT" >&2 echo " This may indicate a transcript format issue" >&2 echo " Ralph loop is stopping." >&2 rm "$RALPH_STATE_FILE" exit 0 fi if [[ -z "$LAST_OUTPUT" ]]; then echo "⚠️ Ralph loop: Assistant message contained no text content" >&2 echo " Ralph loop is stopping." >&2 rm "$RALPH_STATE_FILE" exit 0 fi # Check for completion promise (only if set) if [[ "$COMPLETION_PROMISE" != "null" ]] && [[ -n "$COMPLETION_PROMISE" ]]; then # Extract text from tags using Perl for multiline support # -0777 slurps entire input, s flag makes . match newlines # .*? is non-greedy (takes FIRST tag), whitespace normalized PROMISE_TEXT=$(echo "$LAST_OUTPUT" | perl -0777 -pe 's/.*?(.*?)<\/promise>.*/$1/s; s/^\s+|\s+$//g; s/\s+/ /g' 2>/dev/null || echo "") # Use = for literal string comparison (not pattern matching) # == in [[ ]] does glob pattern matching which breaks with *, ?, [ characters if [[ -n "$PROMISE_TEXT" ]] && [[ "$PROMISE_TEXT" = "$COMPLETION_PROMISE" ]]; then echo "✅ Ralph loop: Detected $COMPLETION_PROMISE" rm "$RALPH_STATE_FILE" exit 0 fi fi # Not complete - continue loop with SAME PROMPT NEXT_ITERATION=$((ITERATION + 1)) # Extract prompt (everything after the closing ---) # Skip first --- line, skip until second --- line, then print everything after # Use i>=2 instead of i==2 to handle --- in prompt content PROMPT_TEXT=$(awk '/^---$/{i++; next} i>=2' "$RALPH_STATE_FILE") if [[ -z "$PROMPT_TEXT" ]]; then echo "⚠️ Ralph loop: State file corrupted or incomplete" >&2 echo " File: $RALPH_STATE_FILE" >&2 echo " Problem: No prompt text found" >&2 echo "" >&2 echo " This usually means:" >&2 echo " • State file was manually edited" >&2 echo " • File was corrupted during writing" >&2 echo "" >&2 echo " Ralph loop is stopping. Run /ralph-loop again to start fresh." >&2 rm "$RALPH_STATE_FILE" exit 0 fi # Update iteration in frontmatter (portable across macOS and Linux) # Create temp file, then atomically replace TEMP_FILE="${RALPH_STATE_FILE}.tmp.$$" sed "s/^iteration: .*/iteration: $NEXT_ITERATION/" "$RALPH_STATE_FILE" > "$TEMP_FILE" mv "$TEMP_FILE" "$RALPH_STATE_FILE" # Build system message with iteration count and completion promise info if [[ "$COMPLETION_PROMISE" != "null" ]] && [[ -n "$COMPLETION_PROMISE" ]]; then SYSTEM_MSG="🔄 Ralph iteration $NEXT_ITERATION | To stop: output $COMPLETION_PROMISE (ONLY when statement is TRUE - do not lie to exit!)" else SYSTEM_MSG="🔄 Ralph iteration $NEXT_ITERATION | No completion promise set - loop runs infinitely" fi # Output JSON to block the stop and feed prompt back # The "reason" field contains the prompt that will be sent back to Claude jq -n \ --arg prompt "$PROMPT_TEXT" \ --arg msg "$SYSTEM_MSG" \ '{ "decision": "block", "reason": $prompt, "systemMessage": $msg }' # Exit 0 for successful hook execution exit 0 ================================================ FILE: plugins/ralph-wiggum/scripts/setup-ralph-loop.sh ================================================ #!/bin/bash # Ralph Loop Setup Script # Creates state file for in-session Ralph loop set -euo pipefail # Parse arguments PROMPT_PARTS=() MAX_ITERATIONS=0 COMPLETION_PROMISE="null" # Parse options and positional arguments while [[ $# -gt 0 ]]; do case $1 in -h|--help) cat << 'HELP_EOF' Ralph Loop - Interactive self-referential development loop USAGE: /ralph-loop [PROMPT...] [OPTIONS] ARGUMENTS: PROMPT... Initial prompt to start the loop (can be multiple words without quotes) OPTIONS: --max-iterations Maximum iterations before auto-stop (default: unlimited) --completion-promise '' Promise phrase (USE QUOTES for multi-word) -h, --help Show this help message DESCRIPTION: Starts a Ralph Wiggum loop in your CURRENT session. The stop hook prevents exit and feeds your output back as input until completion or iteration limit. To signal completion, you must output: YOUR_PHRASE Use this for: - Interactive iteration where you want to see progress - Tasks requiring self-correction and refinement - Learning how Ralph works EXAMPLES: /ralph-loop Build a todo API --completion-promise 'DONE' --max-iterations 20 /ralph-loop --max-iterations 10 Fix the auth bug /ralph-loop Refactor cache layer (runs forever) /ralph-loop --completion-promise 'TASK COMPLETE' Create a REST API STOPPING: Only by reaching --max-iterations or detecting --completion-promise No manual stop - Ralph runs infinitely by default! MONITORING: # View current iteration: grep '^iteration:' .claude/ralph-loop.local.md # View full state: head -10 .claude/ralph-loop.local.md HELP_EOF exit 0 ;; --max-iterations) if [[ -z "${2:-}" ]]; then echo "❌ Error: --max-iterations requires a number argument" >&2 echo "" >&2 echo " Valid examples:" >&2 echo " --max-iterations 10" >&2 echo " --max-iterations 50" >&2 echo " --max-iterations 0 (unlimited)" >&2 echo "" >&2 echo " You provided: --max-iterations (with no number)" >&2 exit 1 fi if ! [[ "$2" =~ ^[0-9]+$ ]]; then echo "❌ Error: --max-iterations must be a positive integer or 0, got: $2" >&2 echo "" >&2 echo " Valid examples:" >&2 echo " --max-iterations 10" >&2 echo " --max-iterations 50" >&2 echo " --max-iterations 0 (unlimited)" >&2 echo "" >&2 echo " Invalid: decimals (10.5), negative numbers (-5), text" >&2 exit 1 fi MAX_ITERATIONS="$2" shift 2 ;; --completion-promise) if [[ -z "${2:-}" ]]; then echo "❌ Error: --completion-promise requires a text argument" >&2 echo "" >&2 echo " Valid examples:" >&2 echo " --completion-promise 'DONE'" >&2 echo " --completion-promise 'TASK COMPLETE'" >&2 echo " --completion-promise 'All tests passing'" >&2 echo "" >&2 echo " You provided: --completion-promise (with no text)" >&2 echo "" >&2 echo " Note: Multi-word promises must be quoted!" >&2 exit 1 fi COMPLETION_PROMISE="$2" shift 2 ;; *) # Non-option argument - collect all as prompt parts PROMPT_PARTS+=("$1") shift ;; esac done # Join all prompt parts with spaces PROMPT="${PROMPT_PARTS[*]}" # Validate prompt is non-empty if [[ -z "$PROMPT" ]]; then echo "❌ Error: No prompt provided" >&2 echo "" >&2 echo " Ralph needs a task description to work on." >&2 echo "" >&2 echo " Examples:" >&2 echo " /ralph-loop Build a REST API for todos" >&2 echo " /ralph-loop Fix the auth bug --max-iterations 20" >&2 echo " /ralph-loop --completion-promise 'DONE' Refactor code" >&2 echo "" >&2 echo " For all options: /ralph-loop --help" >&2 exit 1 fi # Create state file for stop hook (markdown with YAML frontmatter) mkdir -p .claude # Quote completion promise for YAML if it contains special chars or is not null if [[ -n "$COMPLETION_PROMISE" ]] && [[ "$COMPLETION_PROMISE" != "null" ]]; then COMPLETION_PROMISE_YAML="\"$COMPLETION_PROMISE\"" else COMPLETION_PROMISE_YAML="null" fi cat > .claude/ralph-loop.local.md <$COMPLETION_PROMISE" echo "" echo "STRICT REQUIREMENTS (DO NOT VIOLATE):" echo " ✓ Use XML tags EXACTLY as shown above" echo " ✓ The statement MUST be completely and unequivocally TRUE" echo " ✓ Do NOT output false statements to exit the loop" echo " ✓ Do NOT lie even if you think you should exit" echo "" echo "IMPORTANT - Do not circumvent the loop:" echo " Even if you believe you're stuck, the task is impossible," echo " or you've been running too long - you MUST NOT output a" echo " false promise statement. The loop is designed to continue" echo " until the promise is GENUINELY TRUE. Trust the process." echo "" echo " If the loop should stop, the promise statement will become" echo " true naturally. Do not force it by lying." echo "═══════════════════════════════════════════════════════════" fi ================================================ FILE: plugins/security-guidance/.claude-plugin/plugin.json ================================================ { "name": "security-guidance", "version": "1.0.0", "description": "Security reminder hook that warns about potential security issues when editing files, including command injection, XSS, and unsafe code patterns", "author": { "name": "David Dworken", "email": "dworken@anthropic.com" } } ================================================ FILE: plugins/security-guidance/hooks/hooks.json ================================================ { "description": "Security reminder hook that warns about potential security issues when editing files", "hooks": { "PreToolUse": [ { "hooks": [ { "type": "command", "command": "python3 ${CLAUDE_PLUGIN_ROOT}/hooks/security_reminder_hook.py" } ], "matcher": "Edit|Write|MultiEdit" } ] } } ================================================ FILE: plugins/security-guidance/hooks/security_reminder_hook.py ================================================ #!/usr/bin/env python3 """ Security Reminder Hook for Claude Code This hook checks for security patterns in file edits and warns about potential vulnerabilities. """ import json import os import random import sys from datetime import datetime # Debug log file DEBUG_LOG_FILE = "/tmp/security-warnings-log.txt" def debug_log(message): """Append debug message to log file with timestamp.""" try: timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] with open(DEBUG_LOG_FILE, "a") as f: f.write(f"[{timestamp}] {message}\n") except Exception as e: # Silently ignore logging errors to avoid disrupting the hook pass # State file to track warnings shown (session-scoped using session ID) # Security patterns configuration SECURITY_PATTERNS = [ { "ruleName": "github_actions_workflow", "path_check": lambda path: ".github/workflows/" in path and (path.endswith(".yml") or path.endswith(".yaml")), "reminder": """You are editing a GitHub Actions workflow file. Be aware of these security risks: 1. **Command Injection**: Never use untrusted input (like issue titles, PR descriptions, commit messages) directly in run: commands without proper escaping 2. **Use environment variables**: Instead of ${{ github.event.issue.title }}, use env: with proper quoting 3. **Review the guide**: https://github.blog/security/vulnerability-research/how-to-catch-github-actions-workflow-injections-before-attackers-do/ Example of UNSAFE pattern to avoid: run: echo "${{ github.event.issue.title }}" Example of SAFE pattern: env: TITLE: ${{ github.event.issue.title }} run: echo "$TITLE" Other risky inputs to be careful with: - github.event.issue.body - github.event.pull_request.title - github.event.pull_request.body - github.event.comment.body - github.event.review.body - github.event.review_comment.body - github.event.pages.*.page_name - github.event.commits.*.message - github.event.head_commit.message - github.event.head_commit.author.email - github.event.head_commit.author.name - github.event.commits.*.author.email - github.event.commits.*.author.name - github.event.pull_request.head.ref - github.event.pull_request.head.label - github.event.pull_request.head.repo.default_branch - github.head_ref""", }, { "ruleName": "child_process_exec", "substrings": ["child_process.exec", "exec(", "execSync("], "reminder": """⚠️ Security Warning: Using child_process.exec() can lead to command injection vulnerabilities. This codebase provides a safer alternative: src/utils/execFileNoThrow.ts Instead of: exec(`command ${userInput}`) Use: import { execFileNoThrow } from '../utils/execFileNoThrow.js' await execFileNoThrow('command', [userInput]) The execFileNoThrow utility: - Uses execFile instead of exec (prevents shell injection) - Handles Windows compatibility automatically - Provides proper error handling - Returns structured output with stdout, stderr, and status Only use exec() if you absolutely need shell features and the input is guaranteed to be safe.""", }, { "ruleName": "new_function_injection", "substrings": ["new Function"], "reminder": "⚠️ Security Warning: Using new Function() with dynamic strings can lead to code injection vulnerabilities. Consider alternative approaches that don't evaluate arbitrary code. Only use new Function() if you truly need to evaluate arbitrary dynamic code.", }, { "ruleName": "eval_injection", "substrings": ["eval("], "reminder": "⚠️ Security Warning: eval() executes arbitrary code and is a major security risk. Consider using JSON.parse() for data parsing or alternative design patterns that don't require code evaluation. Only use eval() if you truly need to evaluate arbitrary code.", }, { "ruleName": "react_dangerously_set_html", "substrings": ["dangerouslySetInnerHTML"], "reminder": "⚠️ Security Warning: dangerouslySetInnerHTML can lead to XSS vulnerabilities if used with untrusted content. Ensure all content is properly sanitized using an HTML sanitizer library like DOMPurify, or use safe alternatives.", }, { "ruleName": "document_write_xss", "substrings": ["document.write"], "reminder": "⚠️ Security Warning: document.write() can be exploited for XSS attacks and has performance issues. Use DOM manipulation methods like createElement() and appendChild() instead.", }, { "ruleName": "innerHTML_xss", "substrings": [".innerHTML =", ".innerHTML="], "reminder": "⚠️ Security Warning: Setting innerHTML with untrusted content can lead to XSS vulnerabilities. Use textContent for plain text or safe DOM methods for HTML content. If you need HTML support, consider using an HTML sanitizer library such as DOMPurify.", }, { "ruleName": "pickle_deserialization", "substrings": ["pickle"], "reminder": "⚠️ Security Warning: Using pickle with untrusted content can lead to arbitrary code execution. Consider using JSON or other safe serialization formats instead. Only use pickle if it is explicitly needed or requested by the user.", }, { "ruleName": "os_system_injection", "substrings": ["os.system", "from os import system"], "reminder": "⚠️ Security Warning: This code appears to use os.system. This should only be used with static arguments and never with arguments that could be user-controlled.", }, ] def get_state_file(session_id): """Get session-specific state file path.""" return os.path.expanduser(f"~/.claude/security_warnings_state_{session_id}.json") def cleanup_old_state_files(): """Remove state files older than 30 days.""" try: state_dir = os.path.expanduser("~/.claude") if not os.path.exists(state_dir): return current_time = datetime.now().timestamp() thirty_days_ago = current_time - (30 * 24 * 60 * 60) for filename in os.listdir(state_dir): if filename.startswith("security_warnings_state_") and filename.endswith( ".json" ): file_path = os.path.join(state_dir, filename) try: file_mtime = os.path.getmtime(file_path) if file_mtime < thirty_days_ago: os.remove(file_path) except (OSError, IOError): pass # Ignore errors for individual file cleanup except Exception: pass # Silently ignore cleanup errors def load_state(session_id): """Load the state of shown warnings from file.""" state_file = get_state_file(session_id) if os.path.exists(state_file): try: with open(state_file, "r") as f: return set(json.load(f)) except (json.JSONDecodeError, IOError): return set() return set() def save_state(session_id, shown_warnings): """Save the state of shown warnings to file.""" state_file = get_state_file(session_id) try: os.makedirs(os.path.dirname(state_file), exist_ok=True) with open(state_file, "w") as f: json.dump(list(shown_warnings), f) except IOError as e: debug_log(f"Failed to save state file: {e}") pass # Fail silently if we can't save state def check_patterns(file_path, content): """Check if file path or content matches any security patterns.""" # Normalize path by removing leading slashes normalized_path = file_path.lstrip("/") for pattern in SECURITY_PATTERNS: # Check path-based patterns if "path_check" in pattern and pattern["path_check"](normalized_path): return pattern["ruleName"], pattern["reminder"] # Check content-based patterns if "substrings" in pattern and content: for substring in pattern["substrings"]: if substring in content: return pattern["ruleName"], pattern["reminder"] return None, None def extract_content_from_input(tool_name, tool_input): """Extract content to check from tool input based on tool type.""" if tool_name == "Write": return tool_input.get("content", "") elif tool_name == "Edit": return tool_input.get("new_string", "") elif tool_name == "MultiEdit": edits = tool_input.get("edits", []) if edits: return " ".join(edit.get("new_string", "") for edit in edits) return "" return "" def main(): """Main hook function.""" # Check if security reminders are enabled security_reminder_enabled = os.environ.get("ENABLE_SECURITY_REMINDER", "1") # Only run if security reminders are enabled if security_reminder_enabled == "0": sys.exit(0) # Periodically clean up old state files (10% chance per run) if random.random() < 0.1: cleanup_old_state_files() # Read input from stdin try: raw_input = sys.stdin.read() input_data = json.loads(raw_input) except json.JSONDecodeError as e: debug_log(f"JSON decode error: {e}") sys.exit(0) # Allow tool to proceed if we can't parse input # Extract session ID and tool information from the hook input session_id = input_data.get("session_id", "default") tool_name = input_data.get("tool_name", "") tool_input = input_data.get("tool_input", {}) # Check if this is a relevant tool if tool_name not in ["Edit", "Write", "MultiEdit"]: sys.exit(0) # Allow non-file tools to proceed # Extract file path from tool_input file_path = tool_input.get("file_path", "") if not file_path: sys.exit(0) # Allow if no file path # Extract content to check content = extract_content_from_input(tool_name, tool_input) # Check for security patterns rule_name, reminder = check_patterns(file_path, content) if rule_name and reminder: # Create unique warning key warning_key = f"{file_path}-{rule_name}" # Load existing warnings for this session shown_warnings = load_state(session_id) # Check if we've already shown this warning in this session if warning_key not in shown_warnings: # Add to shown warnings and save shown_warnings.add(warning_key) save_state(session_id, shown_warnings) # Output the warning to stderr and block execution print(reminder, file=sys.stderr) sys.exit(2) # Block tool execution (exit code 2 for PreToolUse hooks) # Allow tool to proceed sys.exit(0) if __name__ == "__main__": main() ================================================ FILE: scripts/auto-close-duplicates.ts ================================================ #!/usr/bin/env bun declare global { var process: { env: Record; }; } interface GitHubIssue { number: number; title: string; user: { id: number }; created_at: string; } interface GitHubComment { id: number; body: string; created_at: string; user: { type: string; id: number }; } interface GitHubReaction { user: { id: number }; content: string; } async function githubRequest(endpoint: string, token: string, method: string = 'GET', body?: any): Promise { const response = await fetch(`https://api.github.com${endpoint}`, { method, headers: { Authorization: `Bearer ${token}`, Accept: "application/vnd.github.v3+json", "User-Agent": "auto-close-duplicates-script", ...(body && { "Content-Type": "application/json" }), }, ...(body && { body: JSON.stringify(body) }), }); if (!response.ok) { throw new Error( `GitHub API request failed: ${response.status} ${response.statusText}` ); } return response.json(); } function extractDuplicateIssueNumber(commentBody: string): number | null { // Try to match #123 format first let match = commentBody.match(/#(\d+)/); if (match) { return parseInt(match[1], 10); } // Try to match GitHub issue URL format: https://github.com/owner/repo/issues/123 match = commentBody.match(/github\.com\/[^\/]+\/[^\/]+\/issues\/(\d+)/); if (match) { return parseInt(match[1], 10); } return null; } async function closeIssueAsDuplicate( owner: string, repo: string, issueNumber: number, duplicateOfNumber: number, token: string ): Promise { await githubRequest( `/repos/${owner}/${repo}/issues/${issueNumber}`, token, 'PATCH', { state: 'closed', state_reason: 'duplicate', labels: ['duplicate'] } ); await githubRequest( `/repos/${owner}/${repo}/issues/${issueNumber}/comments`, token, 'POST', { body: `This issue has been automatically closed as a duplicate of #${duplicateOfNumber}. If this is incorrect, please re-open this issue or create a new one. 🤖 Generated with [Claude Code](https://claude.ai/code)` } ); } async function autoCloseDuplicates(): Promise { console.log("[DEBUG] Starting auto-close duplicates script"); const token = process.env.GITHUB_TOKEN; if (!token) { throw new Error("GITHUB_TOKEN environment variable is required"); } console.log("[DEBUG] GitHub token found"); const owner = process.env.GITHUB_REPOSITORY_OWNER || "anthropics"; const repo = process.env.GITHUB_REPOSITORY_NAME || "claude-code"; console.log(`[DEBUG] Repository: ${owner}/${repo}`); const threeDaysAgo = new Date(); threeDaysAgo.setDate(threeDaysAgo.getDate() - 3); console.log( `[DEBUG] Checking for duplicate comments older than: ${threeDaysAgo.toISOString()}` ); console.log("[DEBUG] Fetching open issues created more than 3 days ago..."); const allIssues: GitHubIssue[] = []; let page = 1; const perPage = 100; while (true) { const pageIssues: GitHubIssue[] = await githubRequest( `/repos/${owner}/${repo}/issues?state=open&per_page=${perPage}&page=${page}`, token ); if (pageIssues.length === 0) break; // Filter for issues created more than 3 days ago const oldEnoughIssues = pageIssues.filter(issue => new Date(issue.created_at) <= threeDaysAgo ); allIssues.push(...oldEnoughIssues); page++; // Safety limit to avoid infinite loops if (page > 20) break; } const issues = allIssues; console.log(`[DEBUG] Found ${issues.length} open issues`); let processedCount = 0; let candidateCount = 0; for (const issue of issues) { processedCount++; console.log( `[DEBUG] Processing issue #${issue.number} (${processedCount}/${issues.length}): ${issue.title}` ); console.log(`[DEBUG] Fetching comments for issue #${issue.number}...`); const comments: GitHubComment[] = await githubRequest( `/repos/${owner}/${repo}/issues/${issue.number}/comments`, token ); console.log( `[DEBUG] Issue #${issue.number} has ${comments.length} comments` ); const dupeComments = comments.filter( (comment) => comment.body.includes("Found") && comment.body.includes("possible duplicate") && comment.user.type === "Bot" ); console.log( `[DEBUG] Issue #${issue.number} has ${dupeComments.length} duplicate detection comments` ); if (dupeComments.length === 0) { console.log( `[DEBUG] Issue #${issue.number} - no duplicate comments found, skipping` ); continue; } const lastDupeComment = dupeComments[dupeComments.length - 1]; const dupeCommentDate = new Date(lastDupeComment.created_at); console.log( `[DEBUG] Issue #${ issue.number } - most recent duplicate comment from: ${dupeCommentDate.toISOString()}` ); if (dupeCommentDate > threeDaysAgo) { console.log( `[DEBUG] Issue #${issue.number} - duplicate comment is too recent, skipping` ); continue; } console.log( `[DEBUG] Issue #${ issue.number } - duplicate comment is old enough (${Math.floor( (Date.now() - dupeCommentDate.getTime()) / (1000 * 60 * 60 * 24) )} days)` ); const commentsAfterDupe = comments.filter( (comment) => new Date(comment.created_at) > dupeCommentDate ); console.log( `[DEBUG] Issue #${issue.number} - ${commentsAfterDupe.length} comments after duplicate detection` ); if (commentsAfterDupe.length > 0) { console.log( `[DEBUG] Issue #${issue.number} - has activity after duplicate comment, skipping` ); continue; } console.log( `[DEBUG] Issue #${issue.number} - checking reactions on duplicate comment...` ); const reactions: GitHubReaction[] = await githubRequest( `/repos/${owner}/${repo}/issues/comments/${lastDupeComment.id}/reactions`, token ); console.log( `[DEBUG] Issue #${issue.number} - duplicate comment has ${reactions.length} reactions` ); const authorThumbsDown = reactions.some( (reaction) => reaction.user.id === issue.user.id && reaction.content === "-1" ); console.log( `[DEBUG] Issue #${issue.number} - author thumbs down reaction: ${authorThumbsDown}` ); if (authorThumbsDown) { console.log( `[DEBUG] Issue #${issue.number} - author disagreed with duplicate detection, skipping` ); continue; } const duplicateIssueNumber = extractDuplicateIssueNumber(lastDupeComment.body); if (!duplicateIssueNumber) { console.log( `[DEBUG] Issue #${issue.number} - could not extract duplicate issue number from comment, skipping` ); continue; } candidateCount++; const issueUrl = `https://github.com/${owner}/${repo}/issues/${issue.number}`; try { console.log( `[INFO] Auto-closing issue #${issue.number} as duplicate of #${duplicateIssueNumber}: ${issueUrl}` ); await closeIssueAsDuplicate(owner, repo, issue.number, duplicateIssueNumber, token); console.log( `[SUCCESS] Successfully closed issue #${issue.number} as duplicate of #${duplicateIssueNumber}` ); } catch (error) { console.error( `[ERROR] Failed to close issue #${issue.number} as duplicate: ${error}` ); } } console.log( `[DEBUG] Script completed. Processed ${processedCount} issues, found ${candidateCount} candidates for auto-close` ); } autoCloseDuplicates().catch(console.error); // Make it a module export {}; ================================================ FILE: scripts/backfill-duplicate-comments.ts ================================================ #!/usr/bin/env bun declare global { var process: { env: Record; }; } interface GitHubIssue { number: number; title: string; state: string; state_reason?: string; user: { id: number }; created_at: string; closed_at?: string; } interface GitHubComment { id: number; body: string; created_at: string; user: { type: string; id: number }; } async function githubRequest(endpoint: string, token: string, method: string = 'GET', body?: any): Promise { const response = await fetch(`https://api.github.com${endpoint}`, { method, headers: { Authorization: `Bearer ${token}`, Accept: "application/vnd.github.v3+json", "User-Agent": "backfill-duplicate-comments-script", ...(body && { "Content-Type": "application/json" }), }, ...(body && { body: JSON.stringify(body) }), }); if (!response.ok) { throw new Error( `GitHub API request failed: ${response.status} ${response.statusText}` ); } return response.json(); } async function triggerDedupeWorkflow( owner: string, repo: string, issueNumber: number, token: string, dryRun: boolean = true ): Promise { if (dryRun) { console.log(`[DRY RUN] Would trigger dedupe workflow for issue #${issueNumber}`); return; } await githubRequest( `/repos/${owner}/${repo}/actions/workflows/claude-dedupe-issues.yml/dispatches`, token, 'POST', { ref: 'main', inputs: { issue_number: issueNumber.toString() } } ); } async function backfillDuplicateComments(): Promise { console.log("[DEBUG] Starting backfill duplicate comments script"); const token = process.env.GITHUB_TOKEN; if (!token) { throw new Error(`GITHUB_TOKEN environment variable is required Usage: GITHUB_TOKEN=your_token bun run scripts/backfill-duplicate-comments.ts Environment Variables: GITHUB_TOKEN - GitHub personal access token with repo and actions permissions (required) DRY_RUN - Set to "false" to actually trigger workflows (default: true for safety) MAX_ISSUE_NUMBER - Only process issues with numbers less than this value (default: 4050)`); } console.log("[DEBUG] GitHub token found"); const owner = "anthropics"; const repo = "claude-code"; const dryRun = process.env.DRY_RUN !== "false"; const maxIssueNumber = parseInt(process.env.MAX_ISSUE_NUMBER || "4050", 10); const minIssueNumber = parseInt(process.env.MIN_ISSUE_NUMBER || "1", 10); console.log(`[DEBUG] Repository: ${owner}/${repo}`); console.log(`[DEBUG] Dry run mode: ${dryRun}`); console.log(`[DEBUG] Looking at issues between #${minIssueNumber} and #${maxIssueNumber}`); console.log(`[DEBUG] Fetching issues between #${minIssueNumber} and #${maxIssueNumber}...`); const allIssues: GitHubIssue[] = []; let page = 1; const perPage = 100; while (true) { const pageIssues: GitHubIssue[] = await githubRequest( `/repos/${owner}/${repo}/issues?state=all&per_page=${perPage}&page=${page}&sort=created&direction=desc`, token ); if (pageIssues.length === 0) break; // Filter to only include issues within the specified range const filteredIssues = pageIssues.filter(issue => issue.number >= minIssueNumber && issue.number < maxIssueNumber ); allIssues.push(...filteredIssues); // If the oldest issue in this page is still above our minimum, we need to continue // but if the oldest issue is below our minimum, we can stop const oldestIssueInPage = pageIssues[pageIssues.length - 1]; if (oldestIssueInPage && oldestIssueInPage.number >= maxIssueNumber) { console.log(`[DEBUG] Oldest issue in page #${page} is #${oldestIssueInPage.number}, continuing...`); } else if (oldestIssueInPage && oldestIssueInPage.number < minIssueNumber) { console.log(`[DEBUG] Oldest issue in page #${page} is #${oldestIssueInPage.number}, below minimum, stopping`); break; } else if (filteredIssues.length === 0 && pageIssues.length > 0) { console.log(`[DEBUG] No issues in page #${page} are in range #${minIssueNumber}-#${maxIssueNumber}, continuing...`); } page++; // Safety limit to avoid infinite loops if (page > 200) { console.log("[DEBUG] Reached page limit, stopping pagination"); break; } } console.log(`[DEBUG] Found ${allIssues.length} issues between #${minIssueNumber} and #${maxIssueNumber}`); let processedCount = 0; let candidateCount = 0; let triggeredCount = 0; for (const issue of allIssues) { processedCount++; console.log( `[DEBUG] Processing issue #${issue.number} (${processedCount}/${allIssues.length}): ${issue.title}` ); console.log(`[DEBUG] Fetching comments for issue #${issue.number}...`); const comments: GitHubComment[] = await githubRequest( `/repos/${owner}/${repo}/issues/${issue.number}/comments`, token ); console.log( `[DEBUG] Issue #${issue.number} has ${comments.length} comments` ); // Look for existing duplicate detection comments (from the dedupe bot) const dupeDetectionComments = comments.filter( (comment) => comment.body.includes("Found") && comment.body.includes("possible duplicate") && comment.user.type === "Bot" ); console.log( `[DEBUG] Issue #${issue.number} has ${dupeDetectionComments.length} duplicate detection comments` ); // Skip if there's already a duplicate detection comment if (dupeDetectionComments.length > 0) { console.log( `[DEBUG] Issue #${issue.number} already has duplicate detection comment, skipping` ); continue; } candidateCount++; const issueUrl = `https://github.com/${owner}/${repo}/issues/${issue.number}`; try { console.log( `[INFO] ${dryRun ? '[DRY RUN] ' : ''}Triggering dedupe workflow for issue #${issue.number}: ${issueUrl}` ); await triggerDedupeWorkflow(owner, repo, issue.number, token, dryRun); if (!dryRun) { console.log( `[SUCCESS] Successfully triggered dedupe workflow for issue #${issue.number}` ); } triggeredCount++; } catch (error) { console.error( `[ERROR] Failed to trigger workflow for issue #${issue.number}: ${error}` ); } // Add a delay between workflow triggers to avoid overwhelming the system await new Promise(resolve => setTimeout(resolve, 1000)); } console.log( `[DEBUG] Script completed. Processed ${processedCount} issues, found ${candidateCount} candidates without duplicate comments, ${dryRun ? 'would trigger' : 'triggered'} ${triggeredCount} workflows` ); } backfillDuplicateComments().catch(console.error); // Make it a module export {}; ================================================ FILE: scripts/comment-on-duplicates.sh ================================================ #!/usr/bin/env bash # # Comments on a GitHub issue with a list of potential duplicates. # Usage: ./comment-on-duplicates.sh --base-issue 123 --potential-duplicates 456 789 101 # set -euo pipefail REPO="anthropics/claude-code" BASE_ISSUE="" DUPLICATES=() # Parse arguments while [[ $# -gt 0 ]]; do case $1 in --base-issue) BASE_ISSUE="$2" shift 2 ;; --potential-duplicates) shift while [[ $# -gt 0 && ! "$1" =~ ^-- ]]; do DUPLICATES+=("$1") shift done ;; *) echo "Unknown option: $1" >&2 exit 1 ;; esac done # Validate base issue if [[ -z "$BASE_ISSUE" ]]; then echo "Error: --base-issue is required" >&2 exit 1 fi if ! [[ "$BASE_ISSUE" =~ ^[0-9]+$ ]]; then echo "Error: --base-issue must be a number, got: $BASE_ISSUE" >&2 exit 1 fi # Validate duplicates if [[ ${#DUPLICATES[@]} -eq 0 ]]; then echo "Error: --potential-duplicates requires at least one issue number" >&2 exit 1 fi if [[ ${#DUPLICATES[@]} -gt 3 ]]; then echo "Error: --potential-duplicates accepts at most 3 issues" >&2 exit 1 fi for dup in "${DUPLICATES[@]}"; do if ! [[ "$dup" =~ ^[0-9]+$ ]]; then echo "Error: duplicate issue must be a number, got: $dup" >&2 exit 1 fi done # Validate that base issue exists if ! gh issue view "$BASE_ISSUE" --repo "$REPO" &>/dev/null; then echo "Error: issue #$BASE_ISSUE does not exist in $REPO" >&2 exit 1 fi # Validate that all duplicate issues exist for dup in "${DUPLICATES[@]}"; do if ! gh issue view "$dup" --repo "$REPO" &>/dev/null; then echo "Error: issue #$dup does not exist in $REPO" >&2 exit 1 fi done # Build comment body COUNT=${#DUPLICATES[@]} if [[ $COUNT -eq 1 ]]; then HEADER="Found 1 possible duplicate issue:" else HEADER="Found $COUNT possible duplicate issues:" fi BODY="$HEADER"$'\n\n' INDEX=1 for dup in "${DUPLICATES[@]}"; do BODY+="$INDEX. https://github.com/$REPO/issues/$dup"$'\n' ((INDEX++)) done BODY+=$'\n'"This issue will be automatically closed as a duplicate in 3 days."$'\n\n' BODY+="- If your issue is a duplicate, please close it and 👍 the existing issue instead"$'\n' BODY+="- To prevent auto-closure, add a comment or 👎 this comment"$'\n\n' BODY+="🤖 Generated with [Claude Code](https://claude.ai/code)" # Post the comment gh issue comment "$BASE_ISSUE" --repo "$REPO" --body "$BODY" echo "Posted duplicate comment on issue #$BASE_ISSUE" ================================================ FILE: scripts/edit-issue-labels.sh ================================================ #!/usr/bin/env bash # # Edits labels on a GitHub issue. # Usage: ./edit-issue-labels.sh --issue 123 --add-label bug --add-label needs-triage --remove-label untriaged # set -euo pipefail ISSUE="" ADD_LABELS=() REMOVE_LABELS=() # Parse arguments while [[ $# -gt 0 ]]; do case $1 in --issue) ISSUE="$2" shift 2 ;; --add-label) ADD_LABELS+=("$2") shift 2 ;; --remove-label) REMOVE_LABELS+=("$2") shift 2 ;; *) exit 1 ;; esac done # Validate issue number if [[ -z "$ISSUE" ]]; then exit 1 fi if ! [[ "$ISSUE" =~ ^[0-9]+$ ]]; then exit 1 fi if [[ ${#ADD_LABELS[@]} -eq 0 && ${#REMOVE_LABELS[@]} -eq 0 ]]; then exit 1 fi # Fetch valid labels from the repo VALID_LABELS=$(gh label list --limit 500 --json name --jq '.[].name') # Filter to only labels that exist in the repo FILTERED_ADD=() for label in "${ADD_LABELS[@]}"; do if echo "$VALID_LABELS" | grep -qxF "$label"; then FILTERED_ADD+=("$label") fi done FILTERED_REMOVE=() for label in "${REMOVE_LABELS[@]}"; do if echo "$VALID_LABELS" | grep -qxF "$label"; then FILTERED_REMOVE+=("$label") fi done if [[ ${#FILTERED_ADD[@]} -eq 0 && ${#FILTERED_REMOVE[@]} -eq 0 ]]; then exit 0 fi # Build gh command arguments GH_ARGS=("issue" "edit" "$ISSUE") for label in "${FILTERED_ADD[@]}"; do GH_ARGS+=("--add-label" "$label") done for label in "${FILTERED_REMOVE[@]}"; do GH_ARGS+=("--remove-label" "$label") done gh "${GH_ARGS[@]}" if [[ ${#FILTERED_ADD[@]} -gt 0 ]]; then echo "Added: ${FILTERED_ADD[*]}" fi if [[ ${#FILTERED_REMOVE[@]} -gt 0 ]]; then echo "Removed: ${FILTERED_REMOVE[*]}" fi ================================================ FILE: scripts/gh.sh ================================================ #!/usr/bin/env bash set -euo pipefail # Wrapper around gh CLI that only allows specific subcommands and flags. # All commands are scoped to the current repository via GH_REPO or GITHUB_REPOSITORY. # # Usage: # ./scripts/gh.sh issue view 123 # ./scripts/gh.sh issue view 123 --comments # ./scripts/gh.sh issue list --state open --limit 20 # ./scripts/gh.sh search issues "search query" --limit 10 # ./scripts/gh.sh label list --limit 100 export GH_HOST=github.com REPO="${GH_REPO:-${GITHUB_REPOSITORY:-}}" if [[ -z "$REPO" || "$REPO" == */*/* || "$REPO" != */* ]]; then echo "Error: GH_REPO or GITHUB_REPOSITORY must be set to owner/repo format (e.g., GITHUB_REPOSITORY=anthropics/claude-code)" >&2 exit 1 fi export GH_REPO="$REPO" ALLOWED_FLAGS=(--comments --state --limit --label) FLAGS_WITH_VALUES=(--state --limit --label) SUB1="${1:-}" SUB2="${2:-}" CMD="$SUB1 $SUB2" case "$CMD" in "issue view"|"issue list"|"search issues"|"label list") ;; *) echo "Error: only 'issue view', 'issue list', 'search issues', 'label list' are allowed (e.g., ./scripts/gh.sh issue view 123)" >&2 exit 1 ;; esac shift 2 # Separate flags from positional arguments POSITIONAL=() FLAGS=() skip_next=false for arg in "$@"; do if [[ "$skip_next" == true ]]; then FLAGS+=("$arg") skip_next=false elif [[ "$arg" == -* ]]; then flag="${arg%%=*}" matched=false for allowed in "${ALLOWED_FLAGS[@]}"; do if [[ "$flag" == "$allowed" ]]; then matched=true break fi done if [[ "$matched" == false ]]; then echo "Error: only --comments, --state, --limit, --label flags are allowed (e.g., ./scripts/gh.sh issue list --state open --limit 20)" >&2 exit 1 fi FLAGS+=("$arg") # If flag expects a value and isn't using = syntax, skip next arg if [[ "$arg" != *=* ]]; then for vflag in "${FLAGS_WITH_VALUES[@]}"; do if [[ "$flag" == "$vflag" ]]; then skip_next=true break fi done fi else POSITIONAL+=("$arg") fi done if [[ "$CMD" == "search issues" ]]; then QUERY="${POSITIONAL[0]:-}" QUERY_LOWER=$(echo "$QUERY" | tr '[:upper:]' '[:lower:]') if [[ "$QUERY_LOWER" == *"repo:"* || "$QUERY_LOWER" == *"org:"* || "$QUERY_LOWER" == *"user:"* ]]; then echo "Error: search query must not contain repo:, org:, or user: qualifiers (e.g., ./scripts/gh.sh search issues \"bug report\" --limit 10)" >&2 exit 1 fi gh "$SUB1" "$SUB2" "$QUERY" --repo "$REPO" "${FLAGS[@]}" elif [[ "$CMD" == "issue view" ]]; then if [[ ${#POSITIONAL[@]} -ne 1 ]] || ! [[ "${POSITIONAL[0]}" =~ ^[0-9]+$ ]]; then echo "Error: issue view requires exactly one numeric issue number (e.g., ./scripts/gh.sh issue view 123)" >&2 exit 1 fi gh "$SUB1" "$SUB2" "${POSITIONAL[0]}" "${FLAGS[@]}" else if [[ ${#POSITIONAL[@]} -ne 0 ]]; then echo "Error: issue list and label list do not accept positional arguments (e.g., ./scripts/gh.sh issue list --state open, ./scripts/gh.sh label list --limit 100)" >&2 exit 1 fi gh "$SUB1" "$SUB2" "${FLAGS[@]}" fi ================================================ FILE: scripts/issue-lifecycle.ts ================================================ // Single source of truth for issue lifecycle labels, timeouts, and messages. export const lifecycle = [ { label: "invalid", days: 3, reason: "this doesn't appear to be about Claude Code", nudge: "This doesn't appear to be about [Claude Code](https://github.com/anthropics/claude-code). For general Anthropic support, visit [support.anthropic.com](https://support.anthropic.com).", }, { label: "needs-repro", days: 7, reason: "we still need reproduction steps to investigate", nudge: "We weren't able to reproduce this. Could you provide steps to trigger the issue — what you ran, what happened, and what you expected?", }, { label: "needs-info", days: 7, reason: "we still need a bit more information to move forward", nudge: "We need more information to continue investigating. Can you make sure to include your Claude Code version (`claude --version`), OS, and any error messages or logs?", }, { label: "stale", days: 14, reason: "inactive for too long", nudge: "This issue has been automatically marked as stale due to inactivity.", }, { label: "autoclose", days: 14, reason: "inactive for too long", nudge: "This issue has been marked for automatic closure.", }, ] as const; export type LifecycleLabel = (typeof lifecycle)[number]["label"]; export const STALE_UPVOTE_THRESHOLD = 10; ================================================ FILE: scripts/lifecycle-comment.ts ================================================ #!/usr/bin/env bun // Posts a comment when a lifecycle label is applied to an issue, // giving the author a heads-up and a chance to respond before auto-close. import { lifecycle } from "./issue-lifecycle.ts"; const DRY_RUN = process.argv.includes("--dry-run"); const token = process.env.GITHUB_TOKEN; const repo = process.env.GITHUB_REPOSITORY; // owner/repo const label = process.env.LABEL; const issueNumber = process.env.ISSUE_NUMBER; if (!DRY_RUN && !token) throw new Error("GITHUB_TOKEN required"); if (!repo) throw new Error("GITHUB_REPOSITORY required"); if (!label) throw new Error("LABEL required"); if (!issueNumber) throw new Error("ISSUE_NUMBER required"); const entry = lifecycle.find((l) => l.label === label); if (!entry) { console.log(`No lifecycle entry for label "${label}", skipping`); process.exit(0); } const body = `${entry.nudge} This issue will be closed automatically if there's no activity within ${entry.days} days.`; // -- if (DRY_RUN) { console.log(`Would comment on #${issueNumber} for label "${label}":\n\n${body}`); process.exit(0); } const response = await fetch( `https://api.github.com/repos/${repo}/issues/${issueNumber}/comments`, { method: "POST", headers: { Authorization: `Bearer ${token}`, Accept: "application/vnd.github.v3+json", "Content-Type": "application/json", "User-Agent": "lifecycle-comment", }, body: JSON.stringify({ body }), } ); if (!response.ok) { const text = await response.text(); throw new Error(`GitHub API ${response.status}: ${text}`); } console.log(`Commented on #${issueNumber} for label "${label}"`); ================================================ FILE: scripts/sweep.ts ================================================ #!/usr/bin/env bun import { lifecycle, STALE_UPVOTE_THRESHOLD } from "./issue-lifecycle.ts"; // -- const NEW_ISSUE = "https://github.com/anthropics/claude-code/issues/new/choose"; const DRY_RUN = process.argv.includes("--dry-run"); const CLOSE_MESSAGE = (reason: string) => `Closing for now — ${reason}. Please [open a new issue](${NEW_ISSUE}) if this is still relevant.`; // -- async function githubRequest( endpoint: string, method = "GET", body?: unknown ): Promise { const token = process.env.GITHUB_TOKEN; if (!token) throw new Error("GITHUB_TOKEN required"); const response = await fetch(`https://api.github.com${endpoint}`, { method, headers: { Authorization: `Bearer ${token}`, Accept: "application/vnd.github.v3+json", "User-Agent": "sweep", ...(body && { "Content-Type": "application/json" }), }, ...(body && { body: JSON.stringify(body) }), }); if (!response.ok) { if (response.status === 404) return {} as T; const text = await response.text(); throw new Error(`GitHub API ${response.status}: ${text}`); } return response.json(); } // -- async function markStale(owner: string, repo: string) { const staleDays = lifecycle.find((l) => l.label === "stale")!.days; const cutoff = new Date(); cutoff.setDate(cutoff.getDate() - staleDays); let labeled = 0; console.log(`\n=== marking stale (${staleDays}d inactive) ===`); for (let page = 1; page <= 10; page++) { const issues = await githubRequest( `/repos/${owner}/${repo}/issues?state=open&sort=updated&direction=asc&per_page=100&page=${page}` ); if (issues.length === 0) break; for (const issue of issues) { if (issue.pull_request) continue; if (issue.locked) continue; if (issue.assignees?.length > 0) continue; const updatedAt = new Date(issue.updated_at); if (updatedAt > cutoff) return labeled; const alreadyStale = issue.labels?.some( (l: any) => l.name === "stale" || l.name === "autoclose" ); if (alreadyStale) continue; const thumbsUp = issue.reactions?.["+1"] ?? 0; if (thumbsUp >= STALE_UPVOTE_THRESHOLD) continue; const base = `/repos/${owner}/${repo}/issues/${issue.number}`; if (DRY_RUN) { const age = Math.floor((Date.now() - updatedAt.getTime()) / 86400000); console.log(`#${issue.number}: would label stale (${age}d inactive) — ${issue.title}`); } else { await githubRequest(`${base}/labels`, "POST", { labels: ["stale"] }); console.log(`#${issue.number}: labeled stale — ${issue.title}`); } labeled++; } } return labeled; } async function closeExpired(owner: string, repo: string) { let closed = 0; for (const { label, days, reason } of lifecycle) { const cutoff = new Date(); cutoff.setDate(cutoff.getDate() - days); console.log(`\n=== ${label} (${days}d timeout) ===`); for (let page = 1; page <= 10; page++) { const issues = await githubRequest( `/repos/${owner}/${repo}/issues?state=open&labels=${label}&sort=updated&direction=asc&per_page=100&page=${page}` ); if (issues.length === 0) break; for (const issue of issues) { if (issue.pull_request) continue; if (issue.locked) continue; const thumbsUp = issue.reactions?.["+1"] ?? 0; if (thumbsUp >= STALE_UPVOTE_THRESHOLD) continue; const base = `/repos/${owner}/${repo}/issues/${issue.number}`; const events = await githubRequest(`${base}/events?per_page=100`); const labeledAt = events .filter((e) => e.event === "labeled" && e.label?.name === label) .map((e) => new Date(e.created_at)) .pop(); if (!labeledAt || labeledAt > cutoff) continue; // Skip if a non-bot user commented after the label was applied. // The triage workflow should remove lifecycle labels on human // activity, but check here too as a safety net. const comments = await githubRequest( `${base}/comments?since=${labeledAt.toISOString()}&per_page=100` ); const hasHumanComment = comments.some( (c) => c.user && c.user.type !== "Bot" ); if (hasHumanComment) { console.log( `#${issue.number}: skipping (human activity after ${label} label)` ); continue; } if (DRY_RUN) { const age = Math.floor((Date.now() - labeledAt.getTime()) / 86400000); console.log(`#${issue.number}: would close (${label}, ${age}d old) — ${issue.title}`); } else { await githubRequest(`${base}/comments`, "POST", { body: CLOSE_MESSAGE(reason) }); await githubRequest(base, "PATCH", { state: "closed", state_reason: "not_planned" }); console.log(`#${issue.number}: closed (${label})`); } closed++; } } } return closed; } // -- const owner = process.env.GITHUB_REPOSITORY_OWNER; const repo = process.env.GITHUB_REPOSITORY_NAME; if (!owner || !repo) throw new Error("GITHUB_REPOSITORY_OWNER and GITHUB_REPOSITORY_NAME required"); if (DRY_RUN) console.log("DRY RUN — no changes will be made\n"); const labeled = await markStale(owner, repo); const closed = await closeExpired(owner, repo); console.log(`\nDone: ${labeled} ${DRY_RUN ? "would be labeled" : "labeled"} stale, ${closed} ${DRY_RUN ? "would be closed" : "closed"}`);