Repository: elianiva/dotfiles Branch: master Commit: 40499271695e Files: 307 Total size: 917.2 KB Directory structure: gitextract_76rbdv8a/ ├── .gitignore ├── .gitmodules ├── Makefile ├── README.md ├── agents/ │ ├── AGENTS.md │ ├── opencode/ │ │ └── opencode.json │ ├── pi/ │ │ ├── extensions/ │ │ │ ├── agent-profiles.ts │ │ │ ├── ask.ts │ │ │ ├── composed-editor/ │ │ │ │ ├── index.ts │ │ │ │ └── package.json │ │ │ ├── custom-formatting.ts │ │ │ ├── exit-command.ts │ │ │ ├── handoff.ts │ │ │ ├── lsp/ │ │ │ │ ├── index.ts │ │ │ │ └── lsp-config.ts │ │ │ ├── notify.ts │ │ │ ├── pi-codemode/ │ │ │ │ ├── README.md │ │ │ │ ├── executor.jsonc │ │ │ │ ├── index.ts │ │ │ │ ├── package.json │ │ │ │ ├── src/ │ │ │ │ │ ├── builtins.ts │ │ │ │ │ ├── codemode.ts │ │ │ │ │ ├── executor-cache.ts │ │ │ │ │ ├── fff-plugin.ts │ │ │ │ │ ├── fff.ts │ │ │ │ │ ├── jj-plugin.ts │ │ │ │ │ ├── npm-plugin.ts │ │ │ │ │ ├── pi-plugin.ts │ │ │ │ │ ├── read.ts │ │ │ │ │ ├── render.ts │ │ │ │ │ ├── runtime.ts │ │ │ │ │ ├── sandbox-cache.ts │ │ │ │ │ ├── source-config.ts │ │ │ │ │ ├── source-hydrate.ts │ │ │ │ │ ├── trace.ts │ │ │ │ │ ├── turndown.d.ts │ │ │ │ │ ├── types.ts │ │ │ │ │ ├── util.ts │ │ │ │ │ └── webfetch.ts │ │ │ │ └── tsconfig.json │ │ │ ├── prompt-timer.ts │ │ │ ├── session-breakdown.ts │ │ │ ├── webfetch.ts │ │ │ └── whimsical-timer.ts │ │ ├── mcp.json │ │ ├── models.json │ │ ├── package.json │ │ ├── settings.json │ │ ├── themes/ │ │ │ └── rose-pine-dawn.json │ │ └── treesitter.ts │ └── skills/ │ ├── build-feature/ │ │ └── SKILL.md │ ├── code-review/ │ │ └── SKILL.md │ ├── debugging/ │ │ └── SKILL.md │ ├── deslop/ │ │ └── SKILL.md │ ├── effect-best-practices/ │ │ ├── SKILL.md │ │ └── references/ │ │ ├── anti-patterns.md │ │ ├── effect-atom-patterns.md │ │ ├── error-patterns.md │ │ ├── layer-patterns.md │ │ ├── observability-patterns.md │ │ ├── rpc-cluster-patterns.md │ │ ├── schema-patterns.md │ │ └── service-patterns.md │ ├── emil-design-eng/ │ │ └── SKILL.md │ ├── frontend-design/ │ │ ├── LICENSE.txt │ │ └── SKILL.md │ ├── grill-me/ │ │ └── SKILL.md │ ├── initz/ │ │ └── SKILL.md │ ├── jj/ │ │ └── SKILL.md │ ├── pi-improvements/ │ │ └── SKILL.md │ ├── react-best-practices/ │ │ ├── AGENTS.md │ │ ├── README.md │ │ ├── SKILL.md │ │ ├── metadata.json │ │ └── rules/ │ │ ├── _sections.md │ │ ├── _template.md │ │ ├── advanced-event-handler-refs.md │ │ ├── advanced-use-latest.md │ │ ├── async-api-routes.md │ │ ├── async-defer-await.md │ │ ├── async-dependencies.md │ │ ├── async-parallel.md │ │ ├── async-suspense-boundaries.md │ │ ├── bundle-barrel-imports.md │ │ ├── bundle-conditional.md │ │ ├── bundle-defer-third-party.md │ │ ├── bundle-dynamic-imports.md │ │ ├── bundle-preload.md │ │ ├── client-event-listeners.md │ │ ├── client-localstorage-schema.md │ │ ├── client-passive-event-listeners.md │ │ ├── client-swr-dedup.md │ │ ├── js-batch-dom-css.md │ │ ├── js-cache-function-results.md │ │ ├── js-cache-property-access.md │ │ ├── js-cache-storage.md │ │ ├── js-combine-iterations.md │ │ ├── js-early-exit.md │ │ ├── js-hoist-regexp.md │ │ ├── js-index-maps.md │ │ ├── js-length-check-first.md │ │ ├── js-min-max-loop.md │ │ ├── js-set-map-lookups.md │ │ ├── js-tosorted-immutable.md │ │ ├── rendering-activity.md │ │ ├── rendering-animate-svg-wrapper.md │ │ ├── rendering-conditional-render.md │ │ ├── rendering-content-visibility.md │ │ ├── rendering-hoist-jsx.md │ │ ├── rendering-hydration-no-flicker.md │ │ ├── rendering-svg-precision.md │ │ ├── rendering-usetransition-loading.md │ │ ├── rerender-defer-reads.md │ │ ├── rerender-dependencies.md │ │ ├── rerender-derived-state.md │ │ ├── rerender-functional-setstate.md │ │ ├── rerender-lazy-state-init.md │ │ ├── rerender-memo-with-default-value.md │ │ ├── rerender-memo.md │ │ ├── rerender-simple-expression-in-memo.md │ │ ├── rerender-transitions.md │ │ ├── server-after-nonblocking.md │ │ ├── server-auth-actions.md │ │ ├── server-cache-lru.md │ │ ├── server-cache-react.md │ │ ├── server-dedup-props.md │ │ ├── server-parallel-fetching.md │ │ └── server-serialization.md │ ├── root-cause-tracing/ │ │ └── SKILL.md │ ├── session-retrospective/ │ │ └── SKILL.md │ ├── web-design-guidelines/ │ │ └── SKILL.md │ └── wydt/ │ └── SKILL.md ├── direnv/ │ └── direnvrc ├── fish/ │ ├── conf.d/ │ │ ├── abbr.fish │ │ ├── alias.fish │ │ ├── colour.fish │ │ ├── foreign.fish │ │ ├── manpage.fish │ │ └── vi-mode.fish │ ├── config.fish │ ├── fish_plugins │ ├── fish_variables │ └── functions/ │ ├── fenv.apply.fish │ ├── fenv.fish │ ├── fenv.main.fish │ ├── fenv.parse.after.fish │ ├── fenv.parse.before.fish │ ├── fenv.parse.diff.fish │ └── fenv.parse.divider.fish ├── flake.nix ├── ghostty/ │ └── config ├── gitconfig/ │ └── .gitconfig ├── helix/ │ ├── config.toml │ ├── languages.toml │ ├── runtime/ │ │ └── queries/ │ │ └── typst/ │ │ └── highlights.scm │ └── themes/ │ └── my_rose_pine.toml ├── improvement.md ├── jjui/ │ ├── config.toml │ └── themes/ │ └── rose-pine-dawn.toml ├── kitty/ │ ├── kitty.conf │ ├── launch.conf │ ├── rose-pine-dawn.conf │ └── rose-pine.conf ├── legacy/ │ ├── awesome/ │ │ └── .config/ │ │ └── awesome/ │ │ ├── keybinds/ │ │ │ ├── bindtotags.lua │ │ │ ├── clientbuttons.lua │ │ │ ├── clientkeys.lua │ │ │ ├── globalkeys.lua │ │ │ └── mediakeys.lua │ │ ├── main/ │ │ │ ├── autostart.lua │ │ │ ├── error-handling.lua │ │ │ ├── exitscreen.lua │ │ │ ├── helpers.lua │ │ │ ├── json.lua │ │ │ ├── layouts.lua │ │ │ ├── menu.lua │ │ │ ├── rules.lua │ │ │ ├── signals.lua │ │ │ ├── tags.lua │ │ │ ├── titlebar.lua │ │ │ ├── variables.lua │ │ │ └── volume-widget/ │ │ │ └── init.lua │ │ ├── rc.lua │ │ ├── statusbar/ │ │ │ ├── init.lua │ │ │ └── modules/ │ │ │ ├── battery/ │ │ │ │ └── init.lua │ │ │ ├── clock/ │ │ │ │ └── init.lua │ │ │ ├── cpu/ │ │ │ │ └── init.lua │ │ │ ├── launcher.lua │ │ │ ├── memory/ │ │ │ │ └── init.lua │ │ │ ├── netspeed/ │ │ │ │ └── init.lua │ │ │ ├── systray.lua │ │ │ ├── taglist.lua │ │ │ ├── temp/ │ │ │ │ └── init.lua │ │ │ ├── todo/ │ │ │ │ └── init.lua │ │ │ └── volume/ │ │ │ └── init.lua │ │ └── themes/ │ │ └── main/ │ │ ├── colours.lua │ │ ├── elements.lua │ │ ├── naughty.lua │ │ └── theme.lua │ ├── fcitx5/ │ │ └── .config/ │ │ └── fcitx5/ │ │ ├── conf/ │ │ │ ├── cached_layouts │ │ │ ├── classicui.conf │ │ │ ├── clipboard.conf │ │ │ ├── imselector.conf │ │ │ ├── keyboard-longpress.conf │ │ │ ├── keyboard.conf │ │ │ ├── mozc.conf │ │ │ ├── notifications.conf │ │ │ ├── xcb.conf │ │ │ └── xim.conf │ │ ├── config │ │ ├── profile │ │ └── profile_KrsNBN │ ├── flameshot/ │ │ └── .config/ │ │ └── flameshot/ │ │ └── flameshot.ini │ ├── kitty/ │ │ └── .config/ │ │ └── kitty/ │ │ ├── gitgud-dark.conf │ │ ├── gruvy.conf │ │ ├── icy.conf │ │ ├── kitty.conf │ │ └── visual-studio-dark.conf │ ├── lf/ │ │ └── .config/ │ │ └── lf/ │ │ ├── lfrc │ │ └── preview │ ├── pages/ │ │ ├── chrome-page/ │ │ │ ├── index.html │ │ │ ├── main.js │ │ │ ├── manifest.json │ │ │ └── style.css │ │ ├── ryuko/ │ │ │ └── slim.theme │ │ ├── startpage/ │ │ │ ├── index.html │ │ │ ├── main.js │ │ │ ├── manifest.json │ │ │ └── style.css │ │ └── startpage-v2/ │ │ ├── index.html │ │ ├── main.js │ │ ├── manifest.json │ │ ├── startpage.xpi │ │ ├── style.css │ │ └── test.vue │ ├── scripts/ │ │ └── .scripts/ │ │ ├── extract │ │ ├── launcher │ │ └── lf │ ├── tmux/ │ │ └── .tmux.conf │ └── vscode/ │ └── .config/ │ └── Code/ │ └── User/ │ ├── keybindings.json │ └── settings.json ├── misc/ │ ├── .bashrc │ └── .profile ├── modules/ │ ├── brews.nix │ ├── casks.nix │ ├── darwin-config.nix │ ├── darwin-home.nix │ ├── darwin-packages.nix │ ├── git.nix │ ├── gpg.nix │ ├── home-common.nix │ ├── linux-home.nix │ ├── linux-packages.nix │ ├── linux-terminals.nix │ └── packages.nix ├── nushell/ │ ├── ai.nu │ ├── alias.nu │ ├── bash-env.nu │ ├── config.nu │ ├── rose-pine-dawn.nu │ ├── rose-pine-moon.nu │ ├── vendor/ │ │ └── autoload/ │ │ └── starship.nu │ └── zoxide.nu ├── nvim/ │ ├── init.lua │ ├── lazy-lock.json │ ├── lua/ │ │ ├── config/ │ │ │ ├── autocmds.lua │ │ │ ├── lazy.lua │ │ │ ├── lsp.lua │ │ │ ├── mappings.lua │ │ │ ├── options.lua │ │ │ └── utils.lua │ │ └── plugins/ │ │ ├── align.lua │ │ ├── cmp.lua │ │ ├── colorscheme.lua │ │ ├── conform.lua │ │ ├── dropbar.lua │ │ ├── fff.lua │ │ ├── flash.lua │ │ ├── flutter-tools.off.lua │ │ ├── gitsigns.lua │ │ ├── lua-ls.lua │ │ ├── markdown.lua │ │ ├── mason-lsp.lua │ │ ├── mason.lua │ │ ├── mini-ai.lua │ │ ├── mini-surround.lua │ │ ├── neo-tree.lua │ │ ├── nvim-ts-autotag.lua │ │ ├── obsidian.lua │ │ ├── pairs.lua │ │ ├── qf.lua │ │ ├── snacks.lua │ │ ├── statuscolumn.lua │ │ ├── supermaven.lua │ │ ├── treesitter.lua │ │ ├── ts-comments.lua │ │ ├── typescript.lua │ │ ├── typst.lua │ │ ├── ufo.lua │ │ └── which-key.lua │ └── queries/ │ └── blade/ │ ├── highlights.scm │ └── injections.scm ├── wezterm/ │ └── wezterm.lua ├── yazi/ │ └── yazi.toml └── zellij/ ├── config.kdl ├── layouts/ │ └── plain.kdl └── themes/ ├── rose-pine-dawn.kdl └── rose-pine.kdl ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ result config/fish/fish_variables __pycache__ nushell/history* bin node_modules ================================================ FILE: .gitmodules ================================================ [submodule "dotbot"] path = dotbot url = https://github.com/anishathalye/dotbot ignore = dirty ================================================ FILE: Makefile ================================================ linux: nh home switch --flake .#elianiva --print-build-logs darwin: nh darwin switch . clean: nh clean ================================================ FILE: README.md ================================================ # elianiva's personal dotfiles This is my personal dotfiles repository which has gone through many iterations. Here are some of the screenshots from time to time (the config may or may not be still here, idk): ![arch](./screenshots/preview-arch.png) ![arch-new](./screenshots/preview-arch-new.png) ![old](./screenshots/preview-old.png) ![plasma](./screenshots/preview-plasma.png) ![fedora](./screenshots/preview-fedora.png) ![fedora-2](https://github.com/user-attachments/assets/ce263afd-1986-4c55-93a8-10494302c464) ![macos](https://github.com/user-attachments/assets/e6a1be22-b773-4e0c-b022-eedc70fa8ca8) ================================================ FILE: agents/AGENTS.md ================================================ # Important Rules - Be extremely concise. Sacrifice grammar for the sake of concision. - Don't add tests for what the type system already guarantees. - At the end of each plan, give me a list of unresolved questions to answer, if any. The last thing visible should be numbered list of questions or concrete steps. - No compatibility wrappers, no legacy shims, no temporary plumbing, no backward compats, no reexport for "convenience". - Treat the project as greenfield unless stated otherwise. - You are not alone, expect parallel changes in unrelated files by others. ================================================ FILE: agents/opencode/opencode.json ================================================ { "$schema": "https://opencode.ai/config.json", "agent": { "general": { "disable": true }, "explore": { "disable": true }, "plan": { "permission": { "edit": { "*": "deny", "*.md": "ask" }, "skill": { "*": "deny", "code-review-*": "allow", "brainstorming": "allow", "build-feature": "allow", "*-best-practices": "allow", } } } }, "permission": { "*": "deny", "read": "allow", "write": "allow", "bash": "allow", "webfetch": "allow", "websearch": "allow", "lsp": "allow", "list": "allow", "grep": "allow", "glob": "allow", "edit": "allow", "external_directory": { "*": "ask", "~/Development/personal/*": "allow", "~/Development/work/*": "allow", "~/Repositories/*": "allow", "/tmp/*": "allow", }, "question": "allow", "codesearch": "allow", "skill": { "*": "allow", "pdf": "deny", "xlsx": "deny", }, } } ================================================ FILE: agents/pi/extensions/agent-profiles.ts ================================================ /** * Agent Profiles Extension * * Define agent personas in markdown files with frontmatter. * Content is appended to system prompt each turn. * * Location: * - ~/.pi/agent/agents/*.md (global) * - .pi/agents/*.md (project-local, overrides) * * Format: * ```markdown * --- * name: my-agent * description: What this agent does * tools: [read, bash, edit, write] # whitelist (omit for all) * disabledTools: [grep] # blacklist (omit for none) * --- * * Your system prompt additions here... * ``` * * Usage: * - `pi --agent plan` - start with plan agent * - `/agent` - show selector * - `/agent plan` - switch to plan agent * - `/agent default` - reset to default */ import { existsSync, readFileSync, readdirSync } from "node:fs"; import { homedir } from "node:os"; import { join, basename, extname } from "node:path"; import type { ExtensionAPI, ExtensionContext, } from "@mariozechner/pi-coding-agent"; import { DynamicBorder } from "@mariozechner/pi-coding-agent"; import { Key, Container, type SelectItem, SelectList, Text, } from "@mariozechner/pi-tui"; interface AgentProfile { name: string; description: string; tools?: string[]; disabledTools?: string[]; provider?: string; content: string; // appended to system prompt } const DEFAULT_TOOLS = ["read", "bash", "edit", "write", "grep", "find", "ls", "mcp", "ask"]; // Hardcoded default agent - always available as fallback const DEFAULT_AGENT: AgentProfile = { name: "default", description: "Default agent with full capabilities", tools: DEFAULT_TOOLS, content: "You are pi, a minimal terminal coding agent. Use the tools available to help the user.", }; /** * Parse frontmatter from markdown content */ function parseFrontmatter(content: string): { frontmatter: Record; body: string; } { const match = content.match(/^---\s*\n([\s\S]*?)\n---\s*\n([\s\S]*)$/); if (!match) { return { frontmatter: {}, body: content }; } const yamlText = match[1]; const body = match[2].trim(); const frontmatter: Record = {}; for (const line of yamlText.split("\n")) { const colonIdx = line.indexOf(":"); if (colonIdx === -1) continue; const key = line.slice(0, colonIdx).trim(); let value: unknown = line.slice(colonIdx + 1).trim(); // Parse arrays: [item1, item2] if ( typeof value === "string" && value.startsWith("[") && value.endsWith("]") ) { value = value .slice(1, -1) .split(",") .map((s) => s.trim()) .filter(Boolean); } frontmatter[key] = value; } return { frontmatter, body }; } /** * Load agent profiles from directory */ function loadAgentsFromDir(dir: string): Map { const agents = new Map(); if (!existsSync(dir)) return agents; const entries = readdirSync(dir, { withFileTypes: true }); for (const entry of entries) { if (!entry.isFile()) continue; if (extname(entry.name) !== ".md") continue; const filepath = join(dir, entry.name); try { const content = readFileSync(filepath, "utf-8"); const { frontmatter, body } = parseFrontmatter(content); const name = (frontmatter.name as string) || basename(entry.name, ".md"); agents.set(name, { name, description: (frontmatter.description as string) || "", tools: frontmatter.tools as string[] | undefined, disabledTools: frontmatter.disabledTools as string[] | undefined, provider: frontmatter.provider as string | undefined, content: body, }); } catch { // skip invalid files } } return agents; } /** * Load all agents (global + project, project overrides) * Default agent is always available as fallback */ function loadAgents(cwd: string): Map { const globalDir = join(homedir(), ".pi", "agent", "agents"); const projectDir = join(cwd, ".pi", "agents"); // Start with default agent const agents = new Map(); agents.set("default", DEFAULT_AGENT); // Load global agents (can override default if explicitly defined) const globalAgents = loadAgentsFromDir(globalDir); for (const [name, agent] of globalAgents) { agents.set(name, agent); } // Load project agents (override global) const projectAgents = loadAgentsFromDir(projectDir); for (const [name, agent] of projectAgents) { agents.set(name, agent); } return agents; } /** * Get effective tools list for an agent */ function getEffectiveTools(agent: AgentProfile | undefined): string[] { if (!agent) return DEFAULT_TOOLS; // Explicit whitelist if (agent.tools) { return agent.tools.filter((t) => DEFAULT_TOOLS.includes(t)); } // Blacklist mode if (agent.disabledTools) { return DEFAULT_TOOLS.filter((t) => !agent.disabledTools?.includes(t)); } return DEFAULT_TOOLS; } export default function agentProfilesExtension(pi: ExtensionAPI) { let agents = new Map(); let activeAgent: AgentProfile | undefined; let baseSystemPrompt = ""; // captured at session start // Register --agent CLI flag pi.registerFlag("agent", { description: "Agent profile to use", type: "string", }); /** * Apply an agent profile */ async function applyAgent( name: string, agent: AgentProfile | undefined, ctx: ExtensionContext, ): Promise { activeAgent = agent; // Emit event for other extensions (e.g., starship-prompt) pi.events.emit("agent-profile:changed", { name: agent?.name ?? "default", description: agent?.description ?? "", }); if (!agent) { // Reset to default pi.setActiveTools(DEFAULT_TOOLS); ctx.ui.notify("Reset to default agent", "info"); updateStatus(ctx); return; } // Apply tools - this rebuilds the system prompt with correct tool descriptions const tools = getEffectiveTools(agent); pi.setActiveTools(tools); ctx.ui.notify(`Agent "${name}" activated (${tools.length} tools)`, "info"); updateStatus(ctx); } /** * Build description for UI */ function buildDescription(agent: AgentProfile): string { const parts: string[] = []; const tools = getEffectiveTools(agent); if (tools.length !== DEFAULT_TOOLS.length) { parts.push(`${tools.length} tools`); } if (agent.description) { const truncated = agent.description.length > 40 ? `${agent.description.slice(0, 37)}...` : agent.description; parts.push(truncated); } return parts.join(" | ") || "Custom agent profile"; } /** * Show agent selector UI */ async function showAgentSelector(ctx: ExtensionContext): Promise { const agentList = Array.from(agents.values()).sort((a, b) => a.name.localeCompare(b.name), ); if (agentList.length === 0) { ctx.ui.notify( "No agent profiles found. Create files in ~/.pi/agent/agents/ or .pi/agents/", "warning", ); return; } const items: SelectItem[] = [ { value: "default", label: "default", description: "Reset to default (all tools, no additions)", }, ...agentList.map((agent) => ({ value: agent.name, label: agent.name === activeAgent?.name ? `${agent.name} (active)` : agent.name, description: buildDescription(agent), })), ]; const result = await ctx.ui.custom( (tui, theme, _kb, done) => { const container = new Container(); container.addChild(new DynamicBorder((str) => theme.fg("accent", str))); container.addChild( new Text(theme.fg("accent", theme.bold("Select Agent Profile"))), ); const selectList = new SelectList(items, Math.min(items.length, 12), { selectedPrefix: (text) => theme.fg("accent", text), selectedText: (text) => theme.fg("accent", text), description: (text) => theme.fg("muted", text), scrollInfo: (text) => theme.fg("dim", text), noMatch: (text) => theme.fg("warning", text), }); selectList.onSelect = (item) => done(item.value); selectList.onCancel = () => done(null); const k = (s: string) => theme.bold(theme.fg("accent", s)); const l = (s: string) => theme.fg("dim", s); container.addChild(selectList); container.addChild( new Text( ` ${k("↑↓")}${l(" select")} · ${k("enter")}${l(" submit")} · ${k("esc")}${l(" dismiss")}`, ), ); container.addChild(new DynamicBorder((str) => theme.fg("accent", str))); return { render(width: number) { return container.render(width); }, invalidate() { container.invalidate(); }, handleInput(data: string) { selectList.handleInput(data); tui.requestRender(); }, }; }, ); if (!result) return; if (result === "default") { await applyAgent("default", undefined, ctx); } else { const agent = agents.get(result); if (agent) { await applyAgent(result, agent, ctx); } } } /** * Update status in footer */ function updateStatus(ctx: ExtensionContext) { if (activeAgent && activeAgent.name !== "default") { ctx.ui.setStatus( "agent", ctx.ui.theme.fg("accent", `agent:${activeAgent.name}`), ); } else { ctx.ui.setStatus("agent", undefined); } } /** * Get ordered list of agent names for cycling */ function getAgentOrder(): string[] { return [ "default", ...Array.from(agents.keys()) .filter((n) => n !== "default") .sort(), ]; } /** * Cycle to next agent */ async function cycleAgent(ctx: ExtensionContext): Promise { const agentNames = getAgentOrder(); if (agentNames.length <= 1) { ctx.ui.notify("No custom agents to cycle", "warning"); return; } const currentName = activeAgent?.name ?? "default"; const currentIndex = agentNames.indexOf(currentName); const nextIndex = currentIndex === -1 ? 0 : (currentIndex + 1) % agentNames.length; const nextName = agentNames[nextIndex]; if (nextName === "default") { await applyAgent("default", undefined, ctx); } else { const agent = agents.get(nextName); if (agent) { await applyAgent(nextName, agent, ctx); } } } // Register keyboard shortcut to cycle agents pi.registerShortcut(Key.ctrl(";"), { description: "Cycle to next agent profile", handler: (ctx) => cycleAgent(ctx), }); // Inject agent content into system prompt each turn // This appends to the base system prompt (which was already rebuilt with correct tools) pi.on("before_agent_start", async (event) => { if (activeAgent?.content) { return { systemPrompt: `${event.systemPrompt}\n\n${activeAgent.content}`, }; } }); // Initialize on session start pi.on("session_start", async (_event, ctx) => { // Load agents (includes hardcoded default) agents = loadAgents(ctx.cwd); // Set default as initial active agent activeAgent = DEFAULT_AGENT; // Capture base system prompt (for reference, not used directly) baseSystemPrompt = ctx.getSystemPrompt(); // Emit initial state immediately so other extensions (e.g., starship-prompt) have valid state pi.events.emit("agent-profile:changed", { name: DEFAULT_AGENT.name, description: DEFAULT_AGENT.description, }); // Check CLI flag first const agentFlag = pi.getFlag("agent"); if (typeof agentFlag === "string" && agentFlag) { if (agentFlag === "default") { // Already default, just notify ctx.ui.notify("Using default agent", "info"); } else { const agent = agents.get(agentFlag); if (agent) { await applyAgent(agentFlag, agent, ctx); } else { const available = ["default", ...Array.from(agents.keys())].join( ", ", ); ctx.ui.notify( `Unknown agent "${agentFlag}". Available: ${available}`, "warning", ); } } updateStatus(ctx); return; } // Restore from session state const entries = ctx.sessionManager.getEntries(); const agentEntry = entries .filter( (e: { type: string; customType?: string }) => e.type === "custom" && e.customType === "agent-profile", ) .pop() as { data?: { name: string } } | undefined; if (agentEntry?.data?.name && agentEntry.data.name !== "default") { const agent = agents.get(agentEntry.data.name); if (agent) { await applyAgent(agentEntry.data.name, agent, ctx); } } updateStatus(ctx); }); // Persist active agent when it changes pi.on("agent-profile:changed", async (event) => { pi.appendEntry("agent-profile", { name: event.name }); }); // Register /agent command pi.registerCommand("agent", { description: "Switch agent profile", handler: async (args, ctx) => { const name = args?.trim(); if (!name) { await showAgentSelector(ctx); return; } if (name === "default") { await applyAgent("default", undefined, ctx); return; } const agent = agents.get(name); if (!agent) { const available = ["default", ...Array.from(agents.keys())].join(", "); ctx.ui.notify( `Unknown agent "${name}". Available: ${available}`, "error", ); return; } await applyAgent(name, agent, ctx); }, }); } ================================================ FILE: agents/pi/extensions/ask.ts ================================================ /** * Ask Tool Extension * * Provides an interactive Q&A tool for the LLM to ask users questions * with multiple choice options or free-form text input. */ import type { ExtensionAPI, ToolResult } from "@mariozechner/pi-coding-agent"; import { Key, matchesKey, truncateToWidth } from "@mariozechner/pi-tui"; import { Type } from "@sinclair/typebox"; // Ask tool option schema const AskOptionSchema = Type.Object({ label: Type.String({ description: "Display label for the option" }), description: Type.Optional( Type.String({ description: "Optional description shown below label" }), ), }); const AskQuestionSchema = Type.Object({ question: Type.String({ description: "The question to ask the user" }), options: Type.Array(AskOptionSchema, { description: "Options for the user to choose from", }), shortTitle: Type.Optional( Type.String({ description: "Short 1-2 word title for tab display (defaults to number)", }), ), }); // Single object with everything optional to avoid union issues const AskParams = Type.Object({ questions: Type.Optional( Type.Array(AskQuestionSchema, { description: "A list of questions to ask the user", }), ), question: Type.Optional( Type.String({ description: "The question to ask the user" }), ), options: Type.Optional( Type.Array(AskOptionSchema, { description: "Options for the user to choose from", }), ), }); interface AskDetails { question: string; options: string[]; answer: string | null; wasCustom?: boolean; } interface MultiAskDetails { results: AskDetails[]; } interface QuestionState { optionIndex: number; editMode: boolean; customAnswer: string; answer: string | null; wasCustom: boolean; } export default function askToolExtension(pi: ExtensionAPI): void { pi.registerTool({ name: "ask", label: "Ask", description: "Ask the user one or more questions and let them pick from options or type a custom answer. Use when you need user input to proceed with a decision.", parameters: AskParams, async execute(_toolCallId, params, _signal, _onUpdate, ctx) { const questions: { question: string; options: { label: string; description?: string }[]; shortTitle?: string; }[] = []; if (params.questions && params.questions.length > 0) { questions.push(...params.questions); } else if (params.question && params.options) { questions.push({ question: params.question, options: params.options }); } if (questions.length === 0) { return { content: [ { type: "text", text: "Error: No questions (or question/options) provided", }, ], }; } if (!ctx.hasUI) { return { content: [ { type: "text", text: "Error: UI not available (running in non-interactive mode)", }, ], details: { results: questions.map((q) => ({ question: q.question, options: q.options.map((o) => o.label), answer: null, })), } as MultiAskDetails, }; } // Single question: use simple flow (no tabs, no review) if (questions.length === 1) { return handleSingleQuestion(ctx, questions[0]); } // Multiple questions: use tabbed UI with review return handleMultipleQuestions(ctx, questions); }, }); } async function handleSingleQuestion( ctx: ExtensionAPI["ctx"], q: { question: string; options: { label: string; description?: string }[]; shortTitle?: string; }, ): Promise { const allOptions = [ ...q.options, { label: "Type your own answer", description: "Write a custom response" }, ]; const result = await ctx.ui.custom<{ answer: string; wasCustom: boolean; index?: number; } | null>((tui, theme, _kb, done) => { let optionIndex = 0; let editMode = false; let customAnswer = ""; let cachedLines: string[] | undefined; function refresh() { cachedLines = undefined; tui.requestRender(); } function handleInput(data: string) { if (editMode) { if (matchesKey(data, Key.escape)) { editMode = false; customAnswer = ""; refresh(); return; } if (matchesKey(data, Key.enter)) { const trimmed = customAnswer.trim(); if (trimmed) { done({ answer: trimmed, wasCustom: true }); } else { editMode = false; customAnswer = ""; refresh(); } return; } if (matchesKey(data, Key.backspace)) { customAnswer = customAnswer.slice(0, -1); refresh(); return; } if (data.length === 1 && data.charCodeAt(0) >= 32) { customAnswer += data; refresh(); } return; } if (matchesKey(data, Key.up)) { optionIndex = Math.max(0, optionIndex - 1); refresh(); return; } if (matchesKey(data, Key.down)) { optionIndex = Math.min(allOptions.length - 1, optionIndex + 1); refresh(); return; } if (matchesKey(data, Key.enter)) { const selected = allOptions[optionIndex]; const isLastOption = optionIndex === allOptions.length - 1; if (isLastOption) { editMode = true; refresh(); } else { done({ answer: selected.label, wasCustom: false, index: optionIndex + 1, }); } return; } if (matchesKey(data, Key.escape)) { done(null); } } function render(width: number): string[] { if (cachedLines) return cachedLines; const lines: string[] = []; const add = (s: string) => lines.push(truncateToWidth(s, width)); // Top border add(theme.fg("accent", "─".repeat(width))); // Question add(theme.fg("text", ` ${q.question}`)); lines.push(""); // Options for (let i = 0; i < allOptions.length; i++) { const opt = allOptions[i]; const selected = i === optionIndex; const isLast = i === allOptions.length - 1; const prefix = selected ? theme.fg("accent", "> ") : " "; const num = `${i + 1}.`; if (isLast && editMode) { add(prefix + theme.fg("accent", `${num} ${opt.label} ✎`)); } else if (selected) { add(prefix + theme.fg("accent", `${num} ${opt.label}`)); } else { add(` ${theme.fg("text", `${num} ${opt.label}`)}`); } if (opt.description) { add(` ${theme.fg("muted", opt.description)}`); } } if (editMode) { lines.push(""); add(theme.fg("muted", " Your answer:")); const inputLine = ` > ${customAnswer}_`; add(truncateToWidth(inputLine, width)); } // Help text lines.push(""); const k = (s: string) => theme.bold(theme.fg("accent", s)); const l = (s: string) => theme.fg("dim", s); if (editMode) { add(` ${k("enter")}${l(" · submit")} · ${k("esc")}${l(" go back")}`); } else { add( ` ${k("↑↓")}${l(" select")} · ${k("enter")}${l(" submit")} · ${k("esc")}${l(" dismiss")}`, ); } // Bottom border add(theme.fg("accent", "─".repeat(width))); cachedLines = lines; return lines; } return { render, invalidate: () => { cachedLines = undefined; }, handleInput, }; }); if (!result) { return { content: [{ type: "text", text: "User cancelled the question" }], details: { results: [ { question: q.question, options: q.options.map((o) => o.label), answer: null, }, ], } as MultiAskDetails, }; } return { content: [ { type: "text", text: result.wasCustom ? `User answered: ${result.answer}` : `User selected: ${result.answer}`, }, ], details: { results: [ { question: q.question, options: q.options.map((o) => o.label), answer: result.answer, wasCustom: result.wasCustom, }, ], } as MultiAskDetails, }; } async function handleMultipleQuestions( ctx: ExtensionAPI["ctx"], questions: { question: string; options: { label: string; description?: string }[]; shortTitle?: string; }[], ): Promise { // Initialize state for each question const states: QuestionState[] = questions.map(() => ({ optionIndex: 0, editMode: false, customAnswer: "", answer: null as string | null, wasCustom: false, })); let currentQuestion = 0; let inReview = false; let reviewOptionIndex = 0; // 0 = go back, 1 = submit const result = await ctx.ui.custom<{ results: { answer: string; wasCustom: boolean }[]; } | null>((tui, theme, _kb, done) => { let cachedLines: string[] | undefined; function refresh() { cachedLines = undefined; tui.requestRender(); } function getCurrentOptions() { const q = questions[currentQuestion]; return [ ...q.options, { label: "Type your own answer", description: "Write a custom response", }, ]; } function handleInput(data: string) { if (inReview) { handleReviewInput(data); return; } const state = states[currentQuestion]; const allOptions = getCurrentOptions(); if (state.editMode) { if (matchesKey(data, Key.escape)) { state.editMode = false; state.customAnswer = ""; refresh(); return; } if (matchesKey(data, Key.enter)) { const trimmed = state.customAnswer.trim(); if (trimmed) { state.answer = trimmed; state.wasCustom = true; state.editMode = false; // Auto-advance to next question or review if (currentQuestion < questions.length - 1) { currentQuestion++; } else { inReview = true; } refresh(); } else { state.editMode = false; state.customAnswer = ""; refresh(); } return; } if (matchesKey(data, Key.backspace)) { state.customAnswer = state.customAnswer.slice(0, -1); refresh(); return; } if (data.length === 1 && data.charCodeAt(0) >= 32) { state.customAnswer += data; refresh(); } return; } // Tab navigation between questions if (matchesKey(data, Key.tab)) { currentQuestion = (currentQuestion + 1) % questions.length; refresh(); return; } if (matchesKey(data, Key.shift(Key.tab))) { currentQuestion = (currentQuestion - 1 + questions.length) % questions.length; refresh(); return; } // Option selection within current question if (matchesKey(data, Key.up)) { state.optionIndex = Math.max(0, state.optionIndex - 1); refresh(); return; } if (matchesKey(data, Key.down)) { state.optionIndex = Math.min( allOptions.length - 1, state.optionIndex + 1, ); refresh(); return; } if (matchesKey(data, Key.enter)) { const selected = allOptions[state.optionIndex]; const isLastOption = state.optionIndex === allOptions.length - 1; if (isLastOption) { state.editMode = true; refresh(); } else { state.answer = selected.label; state.wasCustom = false; // Auto-advance to next question or review if (currentQuestion < questions.length - 1) { currentQuestion++; } else { inReview = true; } refresh(); } return; } if (matchesKey(data, Key.escape)) { done(null); } } function handleReviewInput(data: string) { if (matchesKey(data, Key.left) || matchesKey(data, Key.up)) { reviewOptionIndex = 0; refresh(); return; } if (matchesKey(data, Key.right) || matchesKey(data, Key.down)) { reviewOptionIndex = 1; refresh(); return; } if (matchesKey(data, Key.enter)) { if (reviewOptionIndex === 0) { // Go back to edit inReview = false; currentQuestion = 0; refresh(); } else { // Submit done({ results: states.map((s) => ({ answer: s.answer!, wasCustom: s.wasCustom, })), }); } return; } if (matchesKey(data, Key.escape)) { inReview = false; refresh(); } } function render(width: number): string[] { if (cachedLines) return cachedLines; if (inReview) { return renderReview(width); } return renderQuestion(width); } function renderQuestion(width: number): string[] { const lines: string[] = []; const add = (s: string) => lines.push(truncateToWidth(s, width)); const state = states[currentQuestion]; const q = questions[currentQuestion]; const allOptions = getCurrentOptions(); // Tab bar with short titles (fallback to numbers) const tabs = questions.map((q, i) => { const isActive = i === currentQuestion; const isAnswered = states[i].answer !== null; const label = q.shortTitle || `${i + 1}`; if (isActive) { return theme.fg("accent", theme.bold(label)); } if (isAnswered) { return theme.fg("muted", `${label}✓`); } return theme.fg("dim", label); }); add(tabs.join(" · ")); // Top border add(theme.fg("accent", "─".repeat(width))); // Question add(theme.fg("text", ` ${q.question}`)); lines.push(""); // Options for (let i = 0; i < allOptions.length; i++) { const opt = allOptions[i]; const selected = i === state.optionIndex; const isLast = i === allOptions.length - 1; const prefix = selected ? theme.fg("accent", "> ") : " "; const num = `${i + 1}.`; const isActive = !isLast && state.answer === opt.label; const check = isActive ? ` ${theme.fg("accent", "✓")}` : ""; if (isLast && state.editMode) { add(prefix + theme.fg("accent", `${num} ${opt.label} ✎`)); } else if (isLast && state.wasCustom && state.answer) { add( prefix + (selected ? theme.fg("accent", `${num} ${opt.label}`) : theme.fg("text", `${num} ${opt.label}`)) + ` ${theme.fg("accent", "✓")}`, ); } else if (selected) { add(prefix + theme.fg("accent", `${num} ${opt.label}`) + check); } else { add(` ${theme.fg("text", `${num} ${opt.label}`)}` + check); } if (opt.description) { add(` ${theme.fg("muted", opt.description)}`); } } if (state.editMode) { lines.push(""); add(theme.fg("muted", " Your answer:")); const inputLine = ` > ${state.customAnswer}_`; add(truncateToWidth(inputLine, width)); } // Help text lines.push(""); const k = (s: string) => theme.bold(theme.fg("accent", s)); const l = (s: string) => theme.fg("dim", s); if (state.editMode) { add(` ${k("enter")}${l(" · submit")} · ${k("esc")}${l(" go back")}`); } else { add( ` ${k("↑↓")}${l(" select")} · ${k("tab")}${l(" next")} · ${k("enter")}${l(" submit")} · ${k("esc")}${l(" dismiss")}`, ); } // Bottom border add(theme.fg("accent", "─".repeat(width))); cachedLines = lines; return lines; } function renderReview(width: number): string[] { const lines: string[] = []; const add = (s: string) => lines.push(truncateToWidth(s, width)); // Header add(theme.fg("accent", theme.bold(" Review your answers"))); add(theme.fg("accent", "─".repeat(width))); lines.push(""); // Questions and answers for (let i = 0; i < questions.length; i++) { const q = questions[i]; const state = states[i]; const label = q.shortTitle || `Q${i + 1}`; add( ` ${theme.fg("text", theme.bold(`${label}:`))} ${theme.fg("text", q.question)}`, ); if (state.answer) { const prefix = state.wasCustom ? "✎" : "✓"; add( ` ${theme.fg("accent", prefix)} ${theme.fg("text", state.answer)}`, ); } else { add(` ${theme.fg("dim", "○ No answer")}`); } lines.push(""); } // Action buttons const goBackLabel = "← Go back to edit"; const submitLabel = "✔️Submit answers"; const goBack = reviewOptionIndex === 0 ? theme.fg("accent", theme.bold(`> ${goBackLabel}`)) : theme.fg("dim", ` ${goBackLabel}`); const submit = reviewOptionIndex === 1 ? theme.fg("accent", theme.bold(`> ${submitLabel}`)) : theme.fg("dim", ` ${submitLabel}`); add(` ${goBack}`); add(` ${submit}`); // Help text lines.push(""); const k = (s: string) => theme.bold(theme.fg("accent", s)); const l = (s: string) => theme.fg("dim", s); add( ` ${k("↑↓")}${l(" select")} · ${k("enter")}${l(" confirm")} · ${k("esc")}${l(" go back")}`, ); // Bottom border add(theme.fg("accent", "─".repeat(width))); cachedLines = lines; return lines; } return { render, invalidate: () => { cachedLines = undefined; }, handleInput, }; }); if (!result) { return { content: [{ type: "text", text: "User cancelled the questions" }], details: { results: questions.map((q, i) => ({ question: q.question, options: q.options.map((o) => o.label), answer: states[i].answer, })), } as MultiAskDetails, }; } // Format output const outputText = questions .map((q, i) => { const r = result.results[i]; const prefix = r.wasCustom ? "Answered" : "Selected"; const label = q.shortTitle || `Question ${i + 1}`; return `${label}: ${q.question}\n${prefix}: ${r.answer}`; }) .join("\n\n"); return { content: [{ type: "text", text: outputText }], details: { results: questions.map((q, i) => ({ question: q.question, options: q.options.map((o) => o.label), answer: result.results[i].answer, wasCustom: result.results[i].wasCustom, })), } as MultiAskDetails, }; } ================================================ FILE: agents/pi/extensions/composed-editor/index.ts ================================================ /** * Composed Editor - Combines starship-prompt + pi-ckers * * This is your local extension that composes multiple editor features. * Place this in ~/.pi/extensions/composed-editor/ */ import { type ExtensionAPI, CustomEditor } from "@mariozechner/pi-coding-agent"; import type { Picker } from "@elianiva/pi-ckers"; import { createStarshipWidget, setupStarshipEvents } from "@elianiva/pi-starship"; import { withPickers } from "@elianiva/pi-ckers"; import { filePicker, dirPicker } from "@elianiva/pi-ckers/builtin/fff"; import { grepPicker } from "@elianiva/pi-ckers/builtin/grep"; // Compose: StarshipEditor + all builtin pickers const pickers: Picker[] = [ filePicker(), dirPicker(), grepPicker(), ]; const ComposedEditor = withPickers(CustomEditor, pickers); export default function composedEditorExtension(pi: ExtensionAPI) { pi.on("session_start", async (_event, ctx) => { // if (!ctx.hasUI) return; // // // Set composed editor - pass ctx as 5th argument for picker initialization // ctx.ui.setEditorComponent( // (tui, theme, keybindings) => new ComposedEditor(tui, theme, keybindings, undefined, ctx), // ); // Clear footer // ctx.ui.setFooter(() => ({ invalidate() {}, render() { return []; } })); // // Set up starship widget // createStarshipWidget(pi, ctx); }); // Set up starship event handlers // setupStarshipEvents(pi); } ================================================ FILE: agents/pi/extensions/composed-editor/package.json ================================================ { "name": "composed-editor", "version": "1.0.0", "private": true, "description": "Composed editor with starship-prompt + pi-ckers", "dependencies": { "@elianiva/pi-ckers": "link:@elianiva/pi-ckers", "@elianiva/pi-starship": "link:@elianiva/pi-starship", "@mariozechner/pi-coding-agent": "*" } } ================================================ FILE: agents/pi/extensions/custom-formatting.ts ================================================ import { Theme } from "@mariozechner/pi-coding-agent"; import type { ExtensionAPI } from "@mariozechner/pi-coding-agent"; /** * Extension that overrides markdown bold and italic formatting to use colors instead. * * It monkey-patches the Theme prototype so it works across all themes * and doesn't require polling. */ export default function (pi: ExtensionAPI) { const originalBold = Theme.prototype.bold; const originalItalic = Theme.prototype.italic; // Override bold to use 'warning' color (usually yellow/orange) Theme.prototype.bold = function(this: Theme, text: string) { return originalBold(this.fg("mdHeading", text)); }; // Override italic to use 'accent' color (usually teal/cyan) Theme.prototype.italic = function(this: Theme, text: string) { return originalItalic(this.fg("accent", text)); }; // Restore originals on shutdown pi.on("session_shutdown", () => { Theme.prototype.bold = originalBold; Theme.prototype.italic = originalItalic; }); } ================================================ FILE: agents/pi/extensions/exit-command.ts ================================================ import type { ExtensionAPI } from "@mariozechner/pi-coding-agent"; export default function (pi: ExtensionAPI) { pi.on("input", async (event, ctx) => { // Only handle plain "exit" without arguments if (event.text.trim() === "exit") { ctx.shutdown(); return { action: "handled" }; } return { action: "continue" }; }); } ================================================ FILE: agents/pi/extensions/handoff.ts ================================================ /** * Handoff extension - transfer context to a new focused session * * Instead of compacting (which is lossy), handoff extracts what matters * for your next task and creates a new session with a generated prompt. * * Usage: * /handoff now implement this for teams as well * /handoff execute phase one of the plan * /handoff check other places that need this fix * * The generated prompt appears as a draft in the editor for review/editing. */ import { complete, type Message } from "@mariozechner/pi-ai"; import type { ExtensionAPI, SessionEntry } from "@mariozechner/pi-coding-agent"; import { BorderedLoader, convertToLlm, serializeConversation } from "@mariozechner/pi-coding-agent"; const SYSTEM_PROMPT = `You are a context transfer assistant. Given a conversation history and the user's goal for a new thread, generate a focused prompt that: 1. Summarizes relevant context from the conversation (decisions made, approaches taken, key findings) 2. Lists any relevant files that were discussed or modified 3. Clearly states the next task based on the user's goal 4. Is self-contained - the new thread should be able to proceed without the old conversation Format your response as a prompt the user can send to start the new thread. Be concise but include all necessary context. Do not include any preamble like "Here's the prompt" - just output the prompt itself. Example output format: ## Context We've been working on X. Key decisions: - Decision 1 - Decision 2 Files involved: - path/to/file1.ts - path/to/file2.ts ## Task [Clear description of what to do next based on user's goal]`; export default function (pi: ExtensionAPI) { pi.registerCommand("handoff", { description: "Transfer context to a new focused session", handler: async (args, ctx) => { if (!ctx.hasUI) { ctx.ui.notify("handoff requires interactive mode", "error"); return; } if (!ctx.model) { ctx.ui.notify("No model selected", "error"); return; } const goal = args.trim(); if (!goal) { ctx.ui.notify("Usage: /handoff ", "error"); return; } // Gather conversation context from current branch const branch = ctx.sessionManager.getBranch(); const messages = branch .filter((entry): entry is SessionEntry & { type: "message" } => entry.type === "message") .map((entry) => entry.message); if (messages.length === 0) { ctx.ui.notify("No conversation to hand off", "error"); return; } // Convert to LLM format and serialize const llmMessages = convertToLlm(messages); const conversationText = serializeConversation(llmMessages); const currentSessionFile = ctx.sessionManager.getSessionFile(); // Generate the handoff prompt with loader UI const result = await ctx.ui.custom((tui, theme, _kb, done) => { const loader = new BorderedLoader(tui, theme, `Generating handoff prompt...`); loader.onAbort = () => done(null); const doGenerate = async () => { const apiKey = await ctx.modelRegistry.getApiKey(ctx.model!); const userMessage: Message = { role: "user", content: [ { type: "text", text: `## Conversation History\n\n${conversationText}\n\n## User's Goal for New Thread\n\n${goal}`, }, ], timestamp: Date.now(), }; const response = await complete( ctx.model!, { systemPrompt: SYSTEM_PROMPT, messages: [userMessage] }, { apiKey, signal: loader.signal }, ); if (response.stopReason === "aborted") { return null; } return response.content .filter((c): c is { type: "text"; text: string } => c.type === "text") .map((c) => c.text) .join("\n"); }; doGenerate() .then(done) .catch((err) => { console.error("Handoff generation failed:", err); done(null); }); return loader; }); if (result === null) { ctx.ui.notify("Cancelled", "info"); return; } // Let user edit the generated prompt const editedPrompt = await ctx.ui.editor("Edit handoff prompt", result); if (editedPrompt === undefined) { ctx.ui.notify("Cancelled", "info"); return; } // Create new session with parent tracking const newSessionResult = await ctx.newSession({ parentSession: currentSessionFile, }); if (newSessionResult.cancelled) { ctx.ui.notify("New session cancelled", "info"); return; } // Set the edited prompt in the main editor for submission ctx.ui.setEditorText(editedPrompt); ctx.ui.notify("Handoff ready. Submit when ready.", "info"); }, }); } ================================================ FILE: agents/pi/extensions/lsp/index.ts ================================================ /** * LSP Diagnostics Extension * * Warms up LSP servers on file read, provides diagnostics on write/edit operations. * Pattern based on opencode's implementation. */ import type { ExtensionAPI, ToolResultEvent, ExtensionContext, } from "@mariozechner/pi-coding-agent"; import { spawn, type ChildProcess } from "node:child_process"; import { readFileSync } from "node:fs"; import { resolve, extname } from "node:path"; import { pathToFileURL, fileURLToPath } from "node:url"; import { createMessageConnection, StreamMessageReader, StreamMessageWriter, type MessageConnection, } from "vscode-jsonrpc/node"; import type { Diagnostic as VSCodeDiagnostic } from "vscode-languageserver-protocol"; import { LSP_SERVERS, LANGUAGE_MAP, type LspHandle, type LspServerInfo } from "./lsp-config.js"; const DIAGNOSTICS_TIMEOUT_MS = 5000; const DIAGNOSTICS_DEBOUNCE_MS = 150; const LSP_CONNECTION_TIMEOUT_MS = 5000; type Diagnostic = VSCodeDiagnostic; interface LspConnection { connection: MessageConnection; process: ChildProcess; rootDir: string; serverName: string; languages: string[]; startedAt: number; lastActivity: number; diagnostics: Map; openedFiles: Map; initialized: boolean; } interface LspStatus { name: string; pid: number | undefined; rootDir: string; languages: string[]; uptime: number; lastActivity: number; filesTracked: number; initialized: boolean; } class LspConnectionManager { private connections = new Map(); private diagnosticCallbacks = new Map void>>(); async getOrCreateConnections( filePath: string, cwd: string, ctx: ExtensionContext, ): Promise { const ext = extname(filePath); const languageId = LANGUAGE_MAP[ext]; if (!languageId) return []; // Find all matching servers const matchingServers = LSP_SERVERS.filter((server) => server.extensions.includes(ext)); const connections: LspConnection[] = []; for (const server of matchingServers) { const rootDir = await server.root(filePath, cwd); if (!rootDir) continue; // Server doesn't apply to this project const connectionKey = `${server.id}:${rootDir}`; if (this.connections.has(connectionKey)) { const conn = this.connections.get(connectionKey)!; conn.lastActivity = Date.now(); connections.push(conn); continue; } try { const handle = await server.spawn(rootDir, cwd); if (!handle) { ctx.ui.notify(`Could not start ${server.id} LSP server`, "error"); continue; } const conn = await this.createConnection( connectionKey, server.id, handle, rootDir, server.extensions, ctx, AbortSignal.timeout(LSP_CONNECTION_TIMEOUT_MS), ); this.connections.set(connectionKey, conn); connections.push(conn); } catch (e: any) { if (e.name === "TimeoutError") { ctx.ui.notify(`${server.id} LSP connection timed out, skipping`, "warning"); } else { ctx.ui.notify(`Failed to start ${server.id} LSP: ${e}`, "error"); } } } return connections; } private async createConnection( key: string, name: string, handle: LspHandle, rootDir: string, languages: string[], ctx: ExtensionContext, signal: AbortSignal, ): Promise { ctx.ui.notify(`Starting ${name} LSP...`, "info"); const connection = createMessageConnection( new StreamMessageReader(handle.process.stdout), new StreamMessageWriter(handle.process.stdin), ); const conn: LspConnection = { connection, process: handle.process, rootDir, serverName: name, languages: [...languages], startedAt: Date.now(), lastActivity: Date.now(), diagnostics: new Map(), openedFiles: new Map(), initialized: false, }; // Handle diagnostics connection.onNotification("textDocument/publishDiagnostics", (params: any) => { const filePath = this.normalizePath(fileURLToPath(params.uri)); conn.diagnostics.set(filePath, params.diagnostics); // Notify waiters const callbacks = this.diagnosticCallbacks.get(filePath); if (callbacks) { for (const cb of callbacks) cb(); } }); // Handle window/workDoneProgress/create connection.onRequest("window/workDoneProgress/create", () => null); // Handle workspace/configuration connection.onRequest("workspace/configuration", async () => [handle.initialization ?? {}]); // Handle client/registerCapability connection.onRequest("client/registerCapability", async () => {}); // Handle client/unregisterCapability connection.onRequest("client/unregisterCapability", async () => {}); // Handle workspace/workspaceFolders connection.onRequest("workspace/workspaceFolders", async () => [ { name: "workspace", uri: pathToFileURL(rootDir).href, }, ]); connection.listen(); // Initialize with timeout using AbortSignal try { await this.initializeConnection(connection, handle, rootDir, signal); } catch (e: any) { if (e.name === "TimeoutError") { ctx.ui.notify(`${name} LSP connection timed out, continuing anyway`, "warning"); } else { throw e; } } conn.initialized = true; // Handle stderr handle.process.stderr?.on("data", (data: Buffer) => { const msg = data.toString().trim(); if (msg) console.error(`[LSP:${name}]`, msg.slice(0, 200)); }); // Handle process exit handle.process.on("exit", (code) => { if (code && code !== 0) { ctx.ui.notify(`${name} LSP exited with code ${code}`, "error"); } this.connections.delete(key); }); ctx.ui.notify(`${name} LSP ready`, "success"); return conn; } private async initializeConnection( connection: MessageConnection, handle: LspHandle, rootDir: string, signal: AbortSignal, ): Promise { await connection.sendRequest("initialize", { rootUri: pathToFileURL(rootDir).href, processId: handle.process.pid, workspaceFolders: [ { name: "workspace", uri: pathToFileURL(rootDir).href, }, ], initializationOptions: handle.initialization ?? {}, capabilities: { window: { workDoneProgress: true, }, workspace: { configuration: true, didChangeWatchedFiles: { dynamicRegistration: true, }, }, textDocument: { synchronization: { didOpen: true, didChange: true, }, publishDiagnostics: { versionSupport: true, }, }, }, }); await connection.sendNotification("initialized", {}); if (handle.initialization) { await connection.sendNotification("workspace/didChangeConfiguration", { settings: handle.initialization, }); } } /** * Touch file with diagnostic subscription BEFORE sending didOpen * This is the key fix from opencode to avoid race conditions */ async touchFile( conn: LspConnection, filePath: string, content: string, languageId: string, waitForDiagnostics: boolean = false, ): Promise { const normalizedPath = this.normalizePath(filePath); const uri = pathToFileURL(normalizedPath).href; const existingVersion = conn.openedFiles.get(normalizedPath); // Set up diagnostic listener BEFORE sending notification (opencode pattern) let diagnosticsPromise: Promise | undefined; let debounceTimer: ReturnType | undefined; let unsubscribe: (() => void) | undefined; if (waitForDiagnostics) { diagnosticsPromise = new Promise((resolve) => { let resolved = false; const tryResolve = () => { if (resolved) return; if (debounceTimer) clearTimeout(debounceTimer); debounceTimer = setTimeout(() => { resolved = true; unsubscribe?.(); resolve(conn.diagnostics.get(normalizedPath) ?? []); }, DIAGNOSTICS_DEBOUNCE_MS); }; if (!this.diagnosticCallbacks.has(normalizedPath)) { this.diagnosticCallbacks.set(normalizedPath, new Set()); } const callbacks = this.diagnosticCallbacks.get(normalizedPath)!; callbacks.add(tryResolve); unsubscribe = () => callbacks.delete(tryResolve); // Timeout fallback setTimeout(() => { if (!resolved) { resolved = true; unsubscribe?.(); resolve(conn.diagnostics.get(normalizedPath) ?? []); } }, DIAGNOSTICS_TIMEOUT_MS); }); } // Now send the notification if (existingVersion !== undefined) { conn.diagnostics.delete(normalizedPath); const nextVersion = existingVersion + 1; conn.openedFiles.set(normalizedPath, nextVersion); await conn.connection.sendNotification("workspace/didChangeWatchedFiles", { changes: [{ uri, type: 2 }], // Changed }); await conn.connection.sendNotification("textDocument/didChange", { textDocument: { uri, version: nextVersion }, contentChanges: [{ text: content }], }); } else { conn.openedFiles.set(normalizedPath, 0); conn.diagnostics.delete(normalizedPath); await conn.connection.sendNotification("workspace/didChangeWatchedFiles", { changes: [{ uri, type: 1 }], // Created }); await conn.connection.sendNotification("textDocument/didOpen", { textDocument: { uri, languageId, version: 0, text: content }, }); } if (diagnosticsPromise) { return diagnosticsPromise; } return []; } getDiagnostics(conn: LspConnection, filePath: string): Diagnostic[] { const normalizedPath = this.normalizePath(filePath); return conn.diagnostics.get(normalizedPath) ?? []; } getAllDiagnostics(filePath: string): { conn: LspConnection; diagnostics: Diagnostic[] }[] { const normalizedPath = this.normalizePath(filePath); const results: { conn: LspConnection; diagnostics: Diagnostic[] }[] = []; for (const conn of this.connections.values()) { const diags = conn.diagnostics.get(normalizedPath); if (diags && diags.length > 0) { results.push({ conn, diagnostics: diags }); } } return results; } async shutdown(conn: LspConnection): Promise { try { await conn.connection.sendRequest("shutdown", undefined); await conn.connection.sendNotification("exit", {}); conn.connection.end(); conn.connection.dispose(); } catch { // Ignore shutdown errors } conn.process.kill(); } async shutdownAll(): Promise { const promises = Array.from(this.connections.values()).map((conn) => this.shutdown(conn)); await Promise.all(promises); this.connections.clear(); } killConnection(key: string): boolean { const conn = this.connections.get(key); if (!conn) return false; conn.process.kill("SIGTERM"); setTimeout(() => { if (!conn.process.killed) { conn.process.kill("SIGKILL"); } }, 1000); this.connections.delete(key); return true; } getStatus(): LspStatus[] { return Array.from(this.connections.entries()).map(([key, conn]) => ({ name: conn.serverName, pid: conn.process.pid, rootDir: conn.rootDir, languages: conn.languages, uptime: Date.now() - conn.startedAt, lastActivity: Date.now() - conn.lastActivity, filesTracked: conn.openedFiles.size, initialized: conn.initialized, })); } private normalizePath(filePath: string): string { return resolve(filePath); } } // Helper Functions function extractFileFromEvent(event: ToolResultEvent): { path: string; isWrite: boolean } | null { if (!event.isError && (event.toolName === "write" || event.toolName === "edit")) { return { path: event.input.path as string, isWrite: true }; } if (!event.isError && event.toolName === "read") { return { path: event.input.path as string, isWrite: false }; } return null; } function formatDiagnostics(diagnostics: Diagnostic[]): string { const severityLabels = ["", "ERROR", "WARNING", "INFO", "HINT"]; return diagnostics .map((d) => { const line = (d.range?.start?.line ?? 0) + 1; const col = (d.range?.start?.character ?? 0) + 1; const severity = severityLabels[d.severity ?? 0] || "UNKNOWN"; const code = d.code ? `[${d.code}]` : ""; // const source = d.source ? `${d.source}` : ""; return `${severity} [${line}:${col}] ${d.message}`; }) .join("\n"); } function formatDuration(ms: number): string { if (ms < 1000) return `${ms}ms`; if (ms < 60000) return `${(ms / 1000).toFixed(1)}s`; return `${(ms / 60000).toFixed(1)}m`; } export default function (pi: ExtensionAPI) { const manager = new LspConnectionManager(); // Warm up LSP on read, full diagnostics on write/edit pi.on("tool_result", async (event: ToolResultEvent, ctx: ExtensionContext) => { const fileInfo = extractFileFromEvent(event); if (!fileInfo) return; const { path: filePath, isWrite } = fileInfo; const absPath = resolve(ctx.cwd, filePath); const conns = await manager.getOrCreateConnections(filePath, ctx.cwd, ctx); if (conns.length === 0) return; let content: string; try { content = readFileSync(absPath, "utf-8"); } catch { return; } const ext = extname(filePath); const languageId = LANGUAGE_MAP[ext]; if (!languageId) return; // For read operations: warm up and fetch existing diagnostics if available if (!isWrite) { await Promise.all( conns.map((conn) => manager.touchFile(conn, absPath, content, languageId, false)), ); const allDiags = manager.getAllDiagnostics(absPath); // if (allDiags.length > 0) { // const combined = allDiags.flatMap((d) => d.diagnostics); // const formatted = formatDiagnostics(combined); // pi.sendUserMessage(`Current diagnostics for \`${filePath}\`:\n${formatted}`, { // deliverAs: "steer", // }); // } return; } // For write operations: use opencode pattern - subscribe BEFORE sending const diagnostics = ( await Promise.all( conns.map((conn) => manager.touchFile(conn, absPath, content, languageId, true)), ) ).flat(); // Show results if (diagnostics.length > 0) { const formatted = formatDiagnostics(diagnostics); pi.sendUserMessage(`Diagnostics for \`${filePath}\`:\n${formatted}`, { deliverAs: "steer", }); } else { ctx.ui.notify(`✓ ${filePath}: clean`, "success"); } }); // Cleanup on shutdown pi.on("session_shutdown", async () => { await manager.shutdownAll(); }); // /lsp command - dashboard pi.registerCommand("lsp", { description: "LSP status dashboard - show running servers, available LSPs, and manage processes", getArgumentCompletions: (prefix: string) => { const actions = ["status", "available", "kill", "killall"]; const filtered = actions.filter((a) => a.startsWith(prefix)); return filtered.length > 0 ? filtered.map((a) => ({ value: a, label: a })) : null; }, handler: async (args: string, ctx) => { const parts = args.trim().split(/\s+/); const action = parts[0] || "status"; const serverKey = parts[1]; switch (action) { case "status": { const status = manager.getStatus(); if (status.length === 0) { ctx.ui.notify("No LSP servers currently running", "info"); return; } const lines = [ "📊 LSP Server Status", "", ...status.map((s) => { const uptime = formatDuration(s.uptime); const statusStr = s.initialized ? "🟢 ready" : "🟡 initializing"; return ` ${s.name}\n PID: ${s.pid} | Status: ${statusStr}\n Root: ${s.rootDir}\n Languages: ${s.languages.join(", ")}\n Uptime: ${uptime} | Files: ${s.filesTracked}`; }), "", "Use `/lsp kill ` to kill a specific server", "Use `/lsp killall` to kill all servers", ]; ctx.ui.notify(lines.join("\n"), "info"); break; } case "available": { const lines = [ "🔧 Available LSP Servers", "", "Configured servers:", ...LSP_SERVERS.map((s) => ` ${s.id} - ${s.extensions.join(", ")}`), ]; ctx.ui.notify(lines.join("\n"), "info"); break; } case "kill": { if (!serverKey) { const status = manager.getStatus(); if (status.length === 0) { ctx.ui.notify("No servers running", "info"); return; } const keys = status.map((s) => `${s.name}:${s.rootDir}`); ctx.ui.notify("Usage: /lsp kill \n\nAvailable servers:", "error"); ctx.ui.notify(keys.join("\n"), "info"); return; } const killed = manager.killConnection(serverKey); if (killed) { ctx.ui.notify(`Killed LSP server: ${serverKey}`, "success"); } else { ctx.ui.notify(`Server not found: ${serverKey}`, "error"); } break; } case "killall": { const status = manager.getStatus(); await manager.shutdownAll(); ctx.ui.notify( `Killed ${status.length} LSP server${status.length > 1 ? "s" : ""}`, "success", ); break; } } }, }); } ================================================ FILE: agents/pi/extensions/lsp/lsp-config.ts ================================================ /** * LSP Server Configurations * * Each server defines: * - id: Unique identifier * - extensions: File extensions this server handles * - root: Function to find project root directory * - spawn: Function to start the LSP server process */ import { spawn, type ChildProcessWithoutNullStreams } from "node:child_process"; import { access, constants } from "node:fs/promises"; import { resolve, dirname } from "node:path"; import { homedir } from "node:os"; // Mason bin path (Neovim plugin manager) const MASON_BIN_PATH = resolve(homedir(), ".local/share/nvim/mason/bin"); export interface LspHandle { process: ChildProcessWithoutNullStreams; initialization?: Record; } export type RootFunction = (file: string, cwd: string) => Promise; export interface LspServerInfo { id: string; extensions: string[]; root: RootFunction; spawn(root: string, cwd: string): Promise; } // Language ID mapping for LSP export const LANGUAGE_MAP: Record = { ".ts": "typescript", ".tsx": "typescriptreact", ".js": "javascript", ".jsx": "javascriptreact", ".mjs": "javascript", ".cjs": "javascript", ".mts": "typescript", ".cts": "typescript", ".lua": "lua", ".py": "python", ".pyi": "python", ".rs": "rust", ".php": "php", ".svelte": "svelte", ".astro": "astro", ".vue": "vue", ".css": "css", ".scss": "scss", ".less": "less", ".html": "html", ".json": "json", ".jsonc": "json", ".yaml": "yaml", ".yml": "yaml", ".md": "markdown", ".typ": "typst", ".typc": "typst", ".go": "go", ".zig": "zig", ".zon": "zig", ".c": "c", ".cpp": "cpp", ".cc": "cpp", ".cxx": "cpp", ".c++": "cpp", ".h": "c", ".hpp": "cpp", ".hh": "cpp", ".hxx": "cpp", ".h++": "cpp", ".ex": "elixir", ".exs": "elixir", ".cs": "csharp", ".fs": "fsharp", ".fsi": "fsharp", ".fsx": "fsharp", ".fsscript": "fsharp", ".swift": "swift", ".kt": "kotlin", ".kts": "kotlin", ".java": "java", ".rb": "ruby", ".rake": "ruby", ".dart": "dart", ".ml": "ocaml", ".mli": "ocaml", ".sh": "shellscript", ".bash": "shellscript", ".zsh": "shellscript", ".ksh": "shellscript", ".nix": "nix", ".gleam": "gleam", ".clj": "clojure", ".cljs": "clojure", ".cljc": "clojure", ".edn": "clojure", ".hs": "haskell", ".lhs": "haskell", ".jl": "julia", ".tf": "terraform", ".tfvars": "terraform", ".tex": "latex", ".bib": "latex", ".prisma": "prisma", }; /** * Check if a path exists (async) */ async function pathExists(path: string): Promise { try { await access(path, constants.F_OK); return true; } catch { return false; } } /** * Find binary in PATH, checking Mason bin first */ export async function which(bin: string): Promise { // 1. Check Mason bin first (Neovim LSP servers) const masonPath = resolve(MASON_BIN_PATH, bin); if (await pathExists(masonPath)) return masonPath; // 2. Check PATH const pathEnv = process.env.PATH || ""; for (const dir of pathEnv.split(":")) { if (!dir) continue; const fullPath = resolve(dir, bin); if (await pathExists(fullPath)) return fullPath; } return undefined; } /** * Helper to find nearest file upward in directory tree */ export function nearestRoot(includePatterns: string[], excludePatterns?: string[]): RootFunction { return async (file: string, cwd: string) => { let current = dirname(file); const stopAt = cwd; // Check for exclusions first if (excludePatterns) { while (current !== "/" && current !== dirname(current)) { for (const pattern of excludePatterns) { if (await pathExists(resolve(current, pattern))) { return undefined; } } if (current === stopAt) break; current = dirname(current); } } // Reset and check for inclusions current = dirname(file); while (current !== "/" && current !== dirname(current)) { for (const pattern of includePatterns) { if (await pathExists(resolve(current, pattern))) { return current; } } if (current === stopAt) break; current = dirname(current); } return undefined; }; } /** * Create a simple spawn handle */ function createHandle( bin: string, args: string[], root: string, initialization?: Record, ): LspHandle | undefined { const proc = spawn(bin, args, { cwd: root }) as ChildProcessWithoutNullStreams; return { process: proc, initialization }; } // ============================================================================ // LSP Server Definitions // ============================================================================ export const LSP_SERVERS: LspServerInfo[] = [ // TypeScript/JavaScript { id: "typescript", extensions: [".ts", ".tsx", ".js", ".jsx", ".mjs", ".cjs", ".mts", ".cts"], root: nearestRoot( [ "package-lock.json", "bun.lockb", "bun.lock", "pnpm-lock.yaml", "yarn.lock", "package.json", "tsconfig.json", ], ["deno.json", "deno.jsonc"], ), async spawn(root, cwd) { // Try to find local tsserver first let tsserver: string | undefined; const possiblePaths = [ resolve(cwd, "node_modules", "typescript", "lib", "tsserver.js"), resolve(root, "node_modules", "typescript", "lib", "tsserver.js"), ]; for (const p of possiblePaths) { if (await pathExists(p)) { tsserver = p; break; } } const bin = await which("typescript-language-server"); if (!bin) return undefined; return createHandle( bin, ["--stdio"], root, tsserver ? { tsserver: { path: tsserver } } : undefined, ); }, }, // Deno { id: "deno", extensions: [".ts", ".tsx", ".js", ".jsx", ".mjs"], root: nearestRoot( ["deno.json", "deno.jsonc"], [ "package-lock.json", "bun.lockb", "bun.lock", "pnpm-lock.yaml", "yarn.lock", "package.json", "tsconfig.json", ], ), async spawn(root) { const bin = await which("deno"); if (!bin) return undefined; return createHandle(bin, ["lsp"], root); }, }, // Oxlint { id: "oxlint", extensions: [ ".ts", ".tsx", ".js", ".jsx", ".mjs", ".cjs", ".mts", ".cts", ".json", ".jsonc", ".vue", ".astro", ".svelte", ".css", ], root: nearestRoot([ ".oxlintrc.json", "package-lock.json", "bun.lockb", "bun.lock", "pnpm-lock.yaml", "yarn.lock", "package.json", ]), async spawn(root) { // Try local oxlint first let bin = resolve(root, "node_modules", ".bin", "oxlint"); if (!(await pathExists(bin))) { bin = (await which("oxlint")) || ""; } if (!bin || !(await pathExists(bin))) { // Try bun x oxlint const bunBin = (await which("bun")) || "bun"; const proc = spawn(bunBin, ["x", "oxlint", "--lsp"], { cwd: root, }) as ChildProcessWithoutNullStreams; return { process: proc }; } return createHandle(bin, ["--lsp"], root); }, }, // Rust { id: "rust", extensions: [".rs"], root: nearestRoot(["Cargo.toml", "Cargo.lock"]), async spawn(root) { const bin = await which("rust-analyzer"); if (!bin) return undefined; return createHandle(bin, [], root); }, }, // Go { id: "gopls", extensions: [".go"], root: nearestRoot(["go.work", "go.mod", "go.sum"]), async spawn(root) { const bin = await which("gopls"); if (!bin) return undefined; return createHandle(bin, [], root); }, }, // Python (Pyright) { id: "pyright", extensions: [".py", ".pyi"], root: nearestRoot([ "pyproject.toml", "setup.py", "setup.cfg", "requirements.txt", "Pipfile", "pyrightconfig.json", ]), async spawn(root) { let bin = await which("pyright-langserver"); if (!bin) { // Try bun x const bunBin = (await which("bun")) || "bun"; const proc = spawn(bunBin, ["x", "pyright", "--stdio"], { cwd: root, }) as ChildProcessWithoutNullStreams; return { process: proc }; } const initialization: Record = {}; // Check for virtual env const venvPaths = [ process.env.VIRTUAL_ENV, resolve(root, ".venv"), resolve(root, "venv"), ].filter(Boolean) as string[]; for (const venvPath of venvPaths) { const pythonPath = process.platform === "win32" ? resolve(venvPath, "Scripts", "python.exe") : resolve(venvPath, "bin", "python"); if (await pathExists(pythonPath)) { initialization.pythonPath = pythonPath; break; } } return createHandle(bin, ["--stdio"], root, initialization); }, }, // Lua { id: "lua", extensions: [".lua"], root: nearestRoot([ ".luarc.json", ".luarc.jsonc", ".luacheckrc", ".stylua.toml", "stylua.toml", ".git", ]), async spawn(root) { const bin = await which("lua-language-server"); if (!bin) return undefined; return createHandle(bin, [], root, { Lua: { diagnostics: { globals: ["vim"] } } }); }, }, // PHP { id: "intelephense", extensions: [".php"], root: nearestRoot(["composer.json", "composer.lock", ".php-version"]), async spawn(root) { const bin = await which("intelephense"); if (!bin) return undefined; return createHandle(bin, ["--stdio"], root, { telemetry: { enabled: false } }); }, }, // Svelte { id: "svelte", extensions: [".svelte"], root: nearestRoot([ "package-lock.json", "bun.lockb", "bun.lock", "pnpm-lock.yaml", "yarn.lock", "package.json", ]), async spawn(root) { const bin = await which("svelteserver"); if (!bin) return undefined; return createHandle(bin, ["--stdio"], root); }, }, // Vue { id: "vue", extensions: [".vue"], root: nearestRoot([ "package-lock.json", "bun.lockb", "bun.lock", "pnpm-lock.yaml", "yarn.lock", "package.json", ]), async spawn(root) { const bin = await which("vue-language-server"); if (!bin) return undefined; return createHandle(bin, ["--stdio"], root); }, }, // Astro { id: "astro", extensions: [".astro"], root: nearestRoot([ "package-lock.json", "bun.lockb", "bun.lock", "pnpm-lock.yaml", "yarn.lock", "package.json", ]), async spawn(root, cwd) { const bin = await which("astro-ls"); if (!bin) return undefined; // Find typescript for astro let tsdk: string | undefined; const possiblePaths = [ resolve(cwd, "node_modules", "typescript", "lib"), resolve(root, "node_modules", "typescript", "lib"), ]; for (const p of possiblePaths) { if (await pathExists(p)) { tsdk = p; break; } } return createHandle(bin, ["--stdio"], root, tsdk ? { typescript: { tsdk } } : undefined); }, }, // Zig { id: "zls", extensions: [".zig", ".zon"], root: nearestRoot(["build.zig", "build.zig.zon"]), async spawn(root) { const bin = await which("zls"); if (!bin) return undefined; return createHandle(bin, [], root); }, }, // C/C++ { id: "clangd", extensions: [".c", ".cpp", ".cc", ".cxx", ".c++", ".h", ".hpp", ".hh", ".hxx", ".h++"], root: nearestRoot([ "compile_commands.json", "compile_flags.txt", ".clangd", "CMakeLists.txt", "Makefile", ]), async spawn(root) { const bin = await which("clangd"); if (!bin) return undefined; return createHandle(bin, ["--background-index", "--clang-tidy"], root); }, }, // C# { id: "csharp", extensions: [".cs"], root: nearestRoot([".slnx", ".sln", ".csproj", "global.json"]), async spawn(root) { const bin = (await which("csharp-ls")) || (await which("omnisharp")); if (!bin) return undefined; return createHandle(bin, ["-lsp"], root); }, }, // Swift { id: "sourcekit-lsp", extensions: [".swift"], root: nearestRoot(["Package.swift", "*.xcodeproj", "*.xcworkspace"]), async spawn(root) { const bin = await which("sourcekit-lsp"); if (!bin) return undefined; return createHandle(bin, [], root); }, }, // Elixir { id: "elixir-ls", extensions: [".ex", ".exs"], root: nearestRoot(["mix.exs", "mix.lock"]), async spawn(root) { const bin = (await which("elixir-ls")) || (await which("language_server.sh")); if (!bin) return undefined; return createHandle(bin, [], root); }, }, // Kotlin { id: "kotlin-ls", extensions: [".kt", ".kts"], root: nearestRoot([ "settings.gradle.kts", "settings.gradle", "build.gradle.kts", "build.gradle", "pom.xml", ]), async spawn(root) { const bin = await which("kotlin-lsp"); if (!bin) return undefined; return createHandle(bin, ["--stdio"], root); }, }, // Dart { id: "dart", extensions: [".dart"], root: nearestRoot(["pubspec.yaml", "analysis_options.yaml"]), async spawn(root) { const bin = await which("dart"); if (!bin) return undefined; return createHandle(bin, ["language-server", "--lsp"], root); }, }, // OCaml { id: "ocaml-lsp", extensions: [".ml", ".mli"], root: nearestRoot(["dune-project", "dune-workspace", ".merlin", "opam"]), async spawn(root) { const bin = await which("ocamllsp"); if (!bin) return undefined; return createHandle(bin, [], root); }, }, // Bash { id: "bash", extensions: [".sh", ".bash", ".zsh", ".ksh"], root: () => undefined, // Will use cwd async spawn(root) { const bin = await which("bash-language-server"); if (!bin) return undefined; return createHandle(bin, ["start"], root); }, }, // Nix { id: "nixd", extensions: [".nix"], root: nearestRoot(["flake.nix", "flake.lock", ".git"]), async spawn(root) { const bin = await which("nixd"); if (!bin) return undefined; return createHandle(bin, [], root); }, }, // YAML { id: "yaml", extensions: [".yaml", ".yml"], root: nearestRoot(["package.json"]), async spawn(root) { const bin = await which("yaml-language-server"); if (!bin) return undefined; return createHandle(bin, ["--stdio"], root); }, }, // JSON { id: "json", extensions: [".json", ".jsonc"], root: nearestRoot(["package.json"]), async spawn(root) { const bin = await which("vscode-json-language-server"); if (!bin) return undefined; return createHandle(bin, ["--stdio"], root); }, }, // HTML { id: "html", extensions: [".html"], root: nearestRoot(["package.json"]), async spawn(root) { const bin = await which("vscode-html-language-server"); if (!bin) return undefined; return createHandle(bin, ["--stdio"], root); }, }, // CSS { id: "css", extensions: [".css", ".scss", ".less"], root: nearestRoot(["package.json"]), async spawn(root) { const bin = await which("vscode-css-language-server"); if (!bin) return undefined; return createHandle(bin, ["--stdio"], root); }, }, // Typst { id: "tinymist", extensions: [".typ", ".typc"], root: nearestRoot(["typst.toml"]), async spawn(root) { const bin = await which("tinymist"); if (!bin) return undefined; return createHandle(bin, ["lsp"], root); }, }, // Terraform { id: "terraform", extensions: [".tf", ".tfvars"], root: nearestRoot([".terraform.lock.hcl", "terraform.tfstate", "*.tf"]), async spawn(root) { const bin = await which("terraform-ls"); if (!bin) return undefined; return createHandle(bin, ["serve"], root); }, }, // Gleam { id: "gleam", extensions: [".gleam"], root: nearestRoot(["gleam.toml", "manifest.toml"]), async spawn(root) { const bin = await which("gleam"); if (!bin) return undefined; return createHandle(bin, ["lsp"], root); }, }, // Clojure { id: "clojure-lsp", extensions: [".clj", ".cljs", ".cljc", ".edn"], root: nearestRoot(["deps.edn", "project.clj", "shadow-cljs.edn", "bb.edn", "build.boot"]), async spawn(root) { const bin = await which("clojure-lsp"); if (!bin) return undefined; return createHandle(bin, ["listen"], root); }, }, // Haskell { id: "haskell-language-server", extensions: [".hs", ".lhs"], root: nearestRoot(["stack.yaml", "cabal.project", "hie.yaml", "*.cabal"]), async spawn(root) { const bin = await which("haskell-language-server-wrapper"); if (!bin) return undefined; return createHandle(bin, ["--lsp"], root); }, }, // Julia { id: "julials", extensions: [".jl"], root: nearestRoot(["Project.toml", "Manifest.toml"]), async spawn(root) { const bin = await which("julia"); if (!bin) return undefined; const proc = spawn( bin, ["--startup-file=no", "--history-file=no", "-e", "using LanguageServer; runserver()"], { cwd: root }, ) as ChildProcessWithoutNullStreams; return { process: proc }; }, }, // Prisma { id: "prisma", extensions: [".prisma"], root: nearestRoot(["schema.prisma", "prisma/schema.prisma"]), async spawn(root) { const bin = await which("prisma"); if (!bin) return undefined; return createHandle(bin, ["language-server"], root); }, }, ]; ================================================ FILE: agents/pi/extensions/notify.ts ================================================ /** * Pi Notify Extension * * Sends a native terminal notification when Pi agent is done and waiting for input. * Supports multiple terminal protocols: * - OSC 777: Ghostty, iTerm2, WezTerm, rxvt-unicode * - OSC 99: Kitty * - Windows toast: Windows Terminal (WSL) */ import type { ExtensionAPI } from "@mariozechner/pi-coding-agent"; function windowsToastScript(title: string, body: string): string { const type = "Windows.UI.Notifications"; const mgr = `[${type}.ToastNotificationManager, ${type}, ContentType = WindowsRuntime]`; const template = `[${type}.ToastTemplateType]::ToastText01`; const toast = `[${type}.ToastNotification]::new($xml)`; return [ `${mgr} > $null`, `$xml = [${type}.ToastNotificationManager]::GetTemplateContent(${template})`, `$xml.GetElementsByTagName('text')[0].AppendChild($xml.CreateTextNode('${body}')) > $null`, `[${type}.ToastNotificationManager]::CreateToastNotifier('${title}').Show(${toast})`, ].join("; "); } function notifyOSC777(title: string, body: string): void { process.stdout.write(`\x1b]777;notify;${title};${body}\x07`); } function notifyOSC99(title: string, body: string): void { // Kitty OSC 99: i=notification id, d=0 means not done yet, p=body for second part process.stdout.write(`\x1b]99;i=1:d=0;${title}\x1b\\`); process.stdout.write(`\x1b]99;i=1:p=body;${body}\x1b\\`); } function notifyWindows(title: string, body: string): void { const { execFile } = require("child_process"); execFile("powershell.exe", ["-NoProfile", "-Command", windowsToastScript(title, body)]); } function notify(title: string, body: string): void { if (process.env.WT_SESSION) { notifyWindows(title, body); } else if (process.env.KITTY_WINDOW_ID) { notifyOSC99(title, body); } else { notifyOSC777(title, body); } } export default function (pi: ExtensionAPI) { pi.on("agent_end", async () => { notify("Pi", "Ready for input!"); }); } ================================================ FILE: agents/pi/extensions/pi-codemode/README.md ================================================ # pi-codemode Executor plugins for codemode plus curated npm/jj commands and whitelisted bash. ## Tools - `pi.*` builtins (filesystem, bash, webfetch) - `fff.*` search tools - `npm.run` run npm scripts - `npm.install` install packages via lockfile-aware package manager selection - `jj.status` / `jj.diff` / `jj.log` / `jj.new` / `jj.describe` / `jj.commit` for jj ## Bash whitelist - Package managers: `npm`, `pnpm`, `bun`, `yarn`, `npx`, `node` - File operations: `mkdir`, `touch`, `cp`, `mv`, `rm`, `ln`, `chmod`, `chown` - Utilities: `pwd`, `echo`, `which`, `uname`, `date`, `sleep` - Everything else is blocked with guidance to use the equivalent `tools.pi.*` tool ## Rules - no full bash access — whitelisted package managers only - use JS sandbox + named tools instead - repo.install picks pnpm if pnpm-lock.yaml exists, bun if bun.lock/bun.lockb exists, else npm ================================================ FILE: agents/pi/extensions/pi-codemode/executor.jsonc ================================================ { "sources": [ { "kind": "mcp", "transport": "remote", "name": "mcp-typescript server on vercel", "endpoint": "https://mcp.exa.ai/mcp", "remoteTransport": "auto", "namespace": "mcp_typescript_server_on_vercel" } ] } ================================================ FILE: agents/pi/extensions/pi-codemode/index.ts ================================================ import type { ExtensionAPI } from "@mariozechner/pi-coding-agent"; import { createCodemodeTool } from "./src/codemode.js"; import { disposeAllExecutors, getExecutor } from "./src/executor-cache.js"; import { acquireSandbox, disposeAll as disposeAllSandboxes } from "./src/sandbox-cache.js"; import { initFinder } from "./src/fff.js"; export default function registerCodemode(pi: ExtensionAPI) { pi.registerTool(createCodemodeTool()); const activate = () => pi.setActiveTools(["codemode"]); // Pre-warm sandbox, finder, and executor on session start pi.on("session_start", async (event, ctx) => { if (event.reason === "startup" || event.reason === "reload" || event.reason === "new") { await acquireSandbox(ctx.cwd); // creates & caches sandbox initFinder(ctx.cwd).catch(() => {}); // fire-and-forget fff scan getExecutor(ctx.cwd).catch(() => {}); // fire-and-forget executor creation } activate(); }); pi.on("before_agent_start", async () => { activate(); return undefined; }); pi.on("session_shutdown", async () => { await Promise.allSettled([ disposeAllExecutors(), disposeAllSandboxes(), ]); }); } ================================================ FILE: agents/pi/extensions/pi-codemode/package.json ================================================ { "name": "pi-codemode", "version": "0.1.0", "private": true, "type": "module", "description": "pi codemode extension with executor SDK and secure-exec sandbox", "keywords": [ "pi-package" ], "peerDependencies": { "@mariozechner/pi-ai": "*", "@mariozechner/pi-agent-core": "*", "@mariozechner/pi-coding-agent": "*", "@mariozechner/pi-tui": "*", "@sinclair/typebox": "*" }, "dependencies": { "@executor-js/plugin-graphql": "^0.0.1-beta.2", "@executor-js/plugin-mcp": "^0.0.1-beta.2", "@executor-js/plugin-openapi": "^0.0.1-beta.2", "@executor-js/sdk": "^0.0.1-beta.2", "@ff-labs/fff-node": "^0.5.2", "jsonc-parser": "^3.3.1", "secure-exec": "^0.2.1", "turndown": "^7.2.4" }, "devDependencies": {}, "pi": { "extensions": [ "./index.ts" ] }, "packageManager": "bun@1.3.12" } ================================================ FILE: agents/pi/extensions/pi-codemode/src/builtins.ts ================================================ import { createBashTool, createEditTool, createFindTool, createGrepTool, createLsTool, createReadTool, createWriteTool, } from "@mariozechner/pi-coding-agent"; import { createCustomReadTool } from "./read.js"; import { webfetch } from "./webfetch.js"; import type { BuiltinToolName } from "./types.js"; import { Type } from "@sinclair/typebox"; export type BuiltinTool = { name: string; description: string; parameters: unknown; execute: (...args: any[]) => Promise; }; // Only package managers — everything else has a codemode tool equivalent const BASH_COMMAND_WHITELIST = new Set([ // Package managers "vp", "vpx", "npm", "pnpm", "bun", "yarn", "npx", // File operations (no codemode tool equivalent) "mkdir", "touch", "cp", "mv", // Utilities "pwd", "which", "type", "uname", ]); const WHITELIST_HELP = () => [ `Allowed commands: ${[...BASH_COMMAND_WHITELIST].sort().join(", ")}`, "Use tools.pi.* for reading, writing, searching, grepping, editing files.", ].join("\n"); function extractCommandName(command: string): string | null { // Strip shell redirects, pipes, chaining, then grab first word const cleaned = command .replace(/\\s*[|&;<>]/g, " ") .replace(/\(/g, " ") .trim(); const first = cleaned.split(/\s+/)[0]; if (!first) return null; // Strip leading path (e.g., ./node_modules/.bin/npm) — not whitelisted if (first.includes("/")) return null; return first; } function createWhitelistedBashTool(cwd: string): BuiltinTool { const bashTool = createBashTool(cwd) as BuiltinTool; return { name: bashTool.name, description: "Run package manager commands (npm, pnpm, bun, yarn, npx, node). Other commands are blocked — use tools.pi.* instead.", parameters: bashTool.parameters, async execute(toolCallId: string, args: any, signal: AbortSignal | undefined, onUpdate: any) { const command = String(args?.command ?? "").trim(); if (!command) { return { content: [{ type: "text", text: "No command provided.\n\n" + WHITELIST_HELP() }], }; } const cmdName = extractCommandName(command); if (!cmdName || !BASH_COMMAND_WHITELIST.has(cmdName)) { const blocked = cmdName ? `"${cmdName}" is not whitelisted` : "Could not parse command"; return { content: [ { type: "text", text: [`Error: ${blocked}`, "", WHITELIST_HELP()].join("\n"), }, ], }; } // Proxy: forward execute via original tool's execute const result = await bashTool.execute(toolCallId, args, signal, onUpdate); return result; }, }; } const webfetchTool: BuiltinTool = { name: "webfetch", description: "Fetch web content and convert to markdown, text, or html. Use when URLs are mentioned to retrieve content. HTTP URLs upgraded to HTTPS. Images and binary content return empty text.", parameters: Type.Object({ url: Type.String({ description: "The URL to fetch content from" }), format: Type.Optional( Type.Union([Type.Literal("markdown"), Type.Literal("text"), Type.Literal("html")], { description: "Output format: markdown (default), text, or html", }), ), timeout: Type.Optional( Type.Number({ description: "Timeout in seconds (max 120, default 30)" }), ), }), execute: webfetch, }; export type BuiltinToolset = Record; const PI_SIGNATURES = [ "tools.pi.read({ path, offset?, limit? })", "tools.pi.bash({ command, timeout? })", "tools.pi.edit({ path, edits: [{ oldText, newText }] })", "tools.pi.write({ path, content })", "tools.pi.grep({ pattern, path?, glob?, ignoreCase?, literal?, context?, limit? })", "tools.pi.find({ pattern, path?, limit? })", "tools.pi.ls({ path?, limit? })", "tools.pi.webfetch({ url, format?, timeout? })", ]; const FFF_SIGNATURES = [ "tools.fff.grep({ pattern, path?, mode?, smartCase?, glob?, maxMatchesPerFile?, context?, classifyDefinitions?, limit? })", "tools.fff.fileSearch({ query, path?, limit? })", "tools.fff.multiGrep({ patterns, path?, limit? })", "tools.fff.recentFiles({ path?, limit?, minFrecencyScore?, includeUntracked? })", "tools.fff.searchThenGrep({ pathQuery, contentQuery, path?, maxFiles?, limit? })", ]; const META_APIS = ["tools.list()", "tools.schema(name)", "tools.definitions()"]; export function createBuiltinToolset(cwd: string) { return { read: createCustomReadTool(cwd) as BuiltinTool, edit: createEditTool(cwd) as BuiltinTool, write: createWriteTool(cwd) as BuiltinTool, grep: createGrepTool(cwd) as BuiltinTool, find: createFindTool(cwd) as BuiltinTool, ls: createLsTool(cwd) as BuiltinTool, bash: createWhitelistedBashTool(cwd) as BuiltinTool, webfetch: webfetchTool, }; } export function buildCodemodeApiPrompt(): string { return [ "## Available APIs", "", "### pi tools", ...PI_SIGNATURES, "", "### fff tools", ...FFF_SIGNATURES, "", "### Discovery", ...META_APIS, "", "All pi and fff tools return plain text strings. Other tools (MCP, OpenAPI, GraphQL) return objects.", "If the user mentions an unknown tool, call tools.list() to get a list of available tools", "Before using any unfamiliar tool, call tools.schema('tool.name') to get its exact parameter and return types.", "Dynamically loaded tools are namespaced: tools.openapi.petstore.listPets, tools.mcp.myServer.search.", "Prefer one codemode call; batch work with JS and Promise.all.", "", "### Bash restrictions", `tools.pi.bash only allows: ${[...BASH_COMMAND_WHITELIST].sort().join(", ")}.`, "Everything else is blocked — use the dedicated tools above instead.", ].join("\n"); } ================================================ FILE: agents/pi/extensions/pi-codemode/src/codemode.ts ================================================ import { Type } from "@sinclair/typebox"; import { Text } from "@mariozechner/pi-tui"; import type { ExtensionContext, Theme, ToolDefinition } from "@mariozechner/pi-coding-agent"; import { buildCodemodeApiPrompt } from "./builtins.js"; import { getExecutor } from "./executor-cache.js"; import { renderCodemodeCall, renderCodemodeResult } from "./render.js"; import { runCodemode } from "./runtime.js"; import { formatTraceForAgent, summarizeTraceForContext } from "./trace.js"; import type { CodemodeResultDetails } from "./types.js"; import { buildPromptGuidelines, stripCodeFences } from "./util.js"; const codemodeSchema = Type.Object({ code: Type.String({ description: "JavaScript code with tools.pi.* access" }), }); export function createCodemodeTool(): ToolDefinition { return { name: "codemode", label: "codemode", description: "Execute JavaScript in a secure sandbox with access to pi filesystem tools, fff search tools, and dynamically loaded executor tools (MCP, OpenAPI, GraphQL).", promptSnippet: buildCodemodeApiPrompt(), promptGuidelines: buildPromptGuidelines(), parameters: codemodeSchema, async execute( _toolCallId: string, params: { code: string }, signal: AbortSignal | undefined, _onUpdate: unknown, ctx: ExtensionContext, ) { const code = stripCodeFences(params.code); const executor = await getExecutor(ctx.cwd); const result = await runCodemode({ code, cwd: ctx.cwd, executor, signal, }); const { text, images } = formatTraceForAgent(result.trace, result.value, result.logs); return { content: [ { type: "text", text }, ...images.map((img) => ({ type: "image" as const, data: img.data, mimeType: img.mimeType })), ], details: { trace: result.trace, value: result.value, logs: result.logs, summary: summarizeTraceForContext(result.trace), }, }; }, renderCall(args: { code?: string }, theme: Theme) { return renderCodemodeCall(args.code ?? "", theme); }, renderResult(result, options, theme) { const trace = result.details?.trace; if (!trace) return new Text(theme.fg("error", "Missing codemode trace"), 0, 0); if (options.isPartial) return new Text(theme.fg("warning", "Executing..."), 0, 0); return renderCodemodeResult(trace, options.expanded, theme); }, }; } ================================================ FILE: agents/pi/extensions/pi-codemode/src/executor-cache.ts ================================================ import { createExecutor, type Executor } from "@executor-js/sdk"; import { mcpPlugin } from "@executor-js/plugin-mcp"; import { openApiPlugin } from "@executor-js/plugin-openapi"; import { graphqlPlugin } from "@executor-js/plugin-graphql"; import { piPlugin } from "./pi-plugin.js"; import { fffPlugin } from "./fff-plugin.js"; import { getExecutorConfigPath, loadSourcesFromConfig } from "./source-config.js"; import { hydrateExecutorSources } from "./source-hydrate.js"; import { stat } from "node:fs/promises"; type CachedExecutor = { executor: Executor; mtimeMs: number; }; const cache = new Map(); const configMtime = async (): Promise => { try { return (await stat(getExecutorConfigPath())).mtimeMs; } catch { return 0; } }; export const getExecutor = async (cwd: string): Promise => { const mtimeMs = await configMtime(); const cached = cache.get(cwd); if (cached && cached.mtimeMs === mtimeMs) return cached.executor; if (cached) { await cached.executor.close(); cache.delete(cwd); } const loaded = await loadSourcesFromConfig(); const executor = await createExecutor({ scope: { name: "pi-codemode-" + cwd }, plugins: [ piPlugin(cwd), fffPlugin(cwd), mcpPlugin(), openApiPlugin(), graphqlPlugin(), ] as const, }); await hydrateExecutorSources(executor, loaded.sources, { configPath: loaded.configPath, unsupported: loaded.unsupported, }); cache.set(cwd, { executor, mtimeMs: loaded.mtimeMs }); return executor; }; export const disposeExecutor = async (cwd: string): Promise => { const cached = cache.get(cwd); if (!cached) return; await cached.executor.close(); cache.delete(cwd); }; export const disposeAllExecutors = async (): Promise => { await Promise.allSettled(Array.from(cache.values()).map((entry) => entry.executor.close())); cache.clear(); }; ================================================ FILE: agents/pi/extensions/pi-codemode/src/fff-plugin.ts ================================================ import { definePlugin, ToolRegistration, ToolId, ToolInvocationResult, type PluginContext, } from "@executor-js/sdk"; import { createFffBuiltinToolset, initFinder } from "./fff.js"; import { fffToolNames, type FffToolName } from "./types.js"; import { formatError } from "./util.js"; export const fffPlugin = (cwd: string) => definePlugin({ key: "fff", init: async (ctx: PluginContext) => { // Eagerly initialize finder on plugin load await initFinder(cwd); const tools = createFffBuiltinToolset(cwd); // Register fff.* SIMD-accelerated search tools await ctx.tools.registerInvoker("fff", { invoke: async (toolId: string, args: unknown, _options: unknown) => { const name = toolId.split(".").pop() as FffToolName | undefined; if (!name || !(name in tools)) { return new ToolInvocationResult({ data: null, error: "Unknown fff tool: " + toolId }); } const tool = tools[name]; try { const data = await tool.execute(toolId, args ?? {}, undefined, () => {}); return new ToolInvocationResult({ data, error: null }); } catch (cause) { return new ToolInvocationResult({ data: null, error: formatError(cause) }); } }, }); await ctx.tools.register( fffToolNames.map((name) => { const tool = tools[name]; return new ToolRegistration({ id: ToolId.make("fff." + name), pluginKey: "fff", sourceId: "pi", name, description: tool.description, inputSchema: tool.parameters as Record, }); }), ); return { extension: {} }; }, }); ================================================ FILE: agents/pi/extensions/pi-codemode/src/fff.ts ================================================ import { FileFinder } from "@ff-labs/fff-node"; import { Type } from "@sinclair/typebox"; import { resolve } from "node:path"; import type { BuiltinTool } from "./builtins.js"; import type { FffToolName } from "./types.js"; // Global instance cache const finderCache = new Map(); async function getFinder(cwd: string, searchPath?: string): Promise { const basePath = resolve(cwd, searchPath || "."); if (!finderCache.has(basePath)) { const result = FileFinder.create({ basePath }); if (!result.ok) throw new Error(result.error || "unknown"); finderCache.set(basePath, result.value); result.value.waitForScan(5000).catch(() => console.log("fff scan timeout")); } return finderCache.get(basePath)!; } // Eagerly initialize finder for a path (call on session start) export async function initFinder(cwd: string, searchPath?: string): Promise { await getFinder(cwd, searchPath); } export function destroyAllFinders(): void { for (const f of finderCache.values()) f.destroy(); finderCache.clear(); } // Grep tool export const grepTool: BuiltinTool = { name: "grep", description: "Search file contents using fff (SIMD-accelerated). Use as tools.fff.grep()", parameters: Type.Object({ pattern: Type.String(), path: Type.Optional(Type.String()), mode: Type.Optional( Type.Union([Type.Literal("plain"), Type.Literal("regex"), Type.Literal("fuzzy")]), ), smartCase: Type.Optional(Type.Boolean()), glob: Type.Optional(Type.String()), maxMatchesPerFile: Type.Optional(Type.Number()), context: Type.Optional(Type.Number()), classifyDefinitions: Type.Optional(Type.Boolean()), limit: Type.Optional(Type.Number()), }), execute: async (_id: string, args: any, cwd: string) => { const finder = await getFinder(cwd, args.path); const query = args.glob ? args.glob + " " + args.pattern : args.pattern; const res = finder.grep(query, { mode: args.mode || "plain", maxFileSize: args.maxFileSize || 0, maxMatchesPerFile: args.maxMatchesPerFile || 0, smartCase: args.smartCase ?? true, beforeContext: args.context || 0, afterContext: args.context || 0, classifyDefinitions: args.classifyDefinitions || false, } as any); if (!res.ok) throw new Error(res.error); const items = args.limit ? res.value.items.slice(0, args.limit) : res.value.items; return { matches: items.map((m: any) => ({ path: m.relativePath, line: m.lineNumber, column: m.col, content: m.lineContent, matchRanges: m.matchRanges, isDefinition: m.isDefinition ?? false, })), totalMatched: res.value.totalMatched, totalFilesSearched: res.value.totalFilesSearched, nextCursor: res.value.nextCursor, }; }, }; // File search tool export const fileSearchTool: BuiltinTool = { name: "fileSearch", description: "Fuzzy search for files by path. Use as tools.fff.fileSearch()", parameters: Type.Object({ query: Type.String(), path: Type.Optional(Type.String()), limit: Type.Optional(Type.Number()), }), execute: async (_id: string, args: any, cwd: string) => { const finder = await getFinder(cwd, args.path); const res = finder.fileSearch(args.query, { pageSize: args.limit || 20 }); if (!res.ok) throw new Error(res.error); return { files: res.value.items.map((item: any) => ({ path: item.relativePath, name: item.fileName, gitStatus: item.gitStatus, frecencyScore: item.totalFrecencyScore, })), }; }, }; // Multi-pattern grep export const multiGrepTool: BuiltinTool = { name: "multiGrep", description: "Multi-pattern search (OR logic). Use as tools.fff.multiGrep()", parameters: Type.Object({ patterns: Type.Array(Type.String()), path: Type.Optional(Type.String()), limit: Type.Optional(Type.Number()), }), execute: async (_id: string, args: any, cwd: string) => { const finder = await getFinder(cwd, args.path); const res = finder.multiGrep({ patterns: args.patterns, smartCase: true, }); if (!res.ok) throw new Error(res.error); const items = args.limit ? res.value.items.slice(0, args.limit) : res.value.items; return { matches: items.map((m: any) => ({ path: m.relativePath, line: m.lineNumber, content: m.lineContent, })), }; }, }; // Recent files export const recentFilesTool: BuiltinTool = { name: "recentFiles", description: "Get recently accessed files (frecency). Use as tools.fff.recentFiles()", parameters: Type.Object({ path: Type.Optional(Type.String()), limit: Type.Optional(Type.Number()), minFrecencyScore: Type.Optional(Type.Number()), includeUntracked: Type.Optional(Type.Boolean()), }), execute: async (_id: string, args: any, cwd: string) => { const finder = await getFinder(cwd, args.path); const res = finder.fileSearch("", { pageSize: (args.limit || 20) * 2 }); if (!res.ok) throw new Error(res.error); let files = res.value.items.map((item: any, i: number) => ({ path: item.relativePath, name: item.fileName, gitStatus: item.gitStatus, frecencyScore: res.value.scores[i]?.baseScore || 0, })); if (args.minFrecencyScore) files = files.filter((f: any) => f.frecencyScore >= args.minFrecencyScore); if (args.includeUntracked === false) files = files.filter((f: any) => f.gitStatus !== "??"); if (args.limit) files = files.slice(0, args.limit); return { files }; }, }; // Search then grep export const searchThenGrepTool: BuiltinTool = { name: "searchThenGrep", description: "Fuzzy search files then grep within. Use as tools.fff.searchThenGrep()", parameters: Type.Object({ pathQuery: Type.String(), contentQuery: Type.String(), path: Type.Optional(Type.String()), maxFiles: Type.Optional(Type.Number()), limit: Type.Optional(Type.Number()), }), execute: async (_id: string, args: any, cwd: string) => { const finder = await getFinder(cwd, args.path); const fileRes = finder.fileSearch(args.pathQuery, { pageSize: args.maxFiles || 50 }); if (!fileRes.ok) throw new Error(fileRes.error); if (fileRes.value.items.length === 0) return { matches: [], fileMatches: 0 }; const paths = fileRes.value.items.map((item: any) => item.relativePath); const res = finder.grep(args.contentQuery + " " + paths.join(" "), { mode: "plain" }); if (!res.ok) throw new Error(res.error); const items = args.limit ? res.value.items.slice(0, args.limit) : res.value.items; return { matches: items.map((m: any) => ({ path: m.relativePath, line: m.lineNumber, content: m.lineContent, })), fileMatches: paths.length, totalMatched: res.value.totalMatched, }; }, }; export type FffBuiltinToolset = Record; export function createFffBuiltinToolset(cwd: string): FffBuiltinToolset { const withCwd = (tool: BuiltinTool): BuiltinTool => ({ ...tool, execute: async (_toolCallId: string, args: any, _signal: any, _onUpdate: any) => tool.execute(_toolCallId, args, cwd), }); return { grep: withCwd(grepTool), fileSearch: withCwd(fileSearchTool), multiGrep: withCwd(multiGrepTool), recentFiles: withCwd(recentFilesTool), searchThenGrep: withCwd(searchThenGrepTool), }; } ================================================ FILE: agents/pi/extensions/pi-codemode/src/jj-plugin.ts ================================================ import { definePlugin, ToolRegistration, ToolId, ToolInvocationResult, type PluginContext, } from "@executor-js/sdk"; import { spawn } from "node:child_process"; import { formatError } from "./util.js"; const run = async (args: string[], cwd: string, signal?: AbortSignal) => await new Promise((resolve, reject) => { const child = spawn("jj", args, { cwd, signal, stdio: ["ignore", "pipe", "pipe"] }); let stdout = ""; let stderr = ""; child.stdout.on("data", (d) => (stdout += d.toString())); child.stderr.on("data", (d) => (stderr += d.toString())); child.on("error", reject); child.on("close", (code) => resolve({ code: code ?? 0, stdout, stderr, command: ["jj", ...args] }), ); }); const summarize = (r: any) => [ "$ " + r.command.join(" "), r.stdout?.trim(), r.stderr?.trim() ? "stderr: " + r.stderr.trim() : "", "exit " + r.code, ] .filter(Boolean) .join(" "); const text = (s: string) => [{ type: "text", text: s }]; export const jjPlugin = (cwd: string) => definePlugin({ key: "jj", init: async (ctx: PluginContext) => { const tools = { status: { description: "Show jj status", parameters: { type: "object", properties: {}, additionalProperties: false }, execute: async () => { const r = await run(["status"], cwd); return { content: text(summarize(r)), details: { result: r } }; }, }, diff: { description: "Show jj diff", parameters: { type: "object", properties: {}, additionalProperties: false }, execute: async () => { const r = await run(["diff"], cwd); return { content: text(summarize(r)), details: { result: r } }; }, }, log: { description: "Show jj log", parameters: { type: "object", properties: {}, additionalProperties: false }, execute: async () => { const r = await run(["log"], cwd); return { content: text(summarize(r)), details: { result: r } }; }, }, new: { description: "Create a new jj change", parameters: { type: "object", properties: {}, additionalProperties: false }, execute: async () => { const r = await run(["new"], cwd); return { content: text(summarize(r)), details: { result: r } }; }, }, describe: { description: "Describe the current jj change", parameters: { type: "object", properties: { description: { type: "string" } }, required: ["description"], additionalProperties: false, }, execute: async (_id: string, args: any) => { const description = String(args?.description ?? ""); if (!description) throw new Error("Missing description"); const r = await run(["describe", "-m", description], cwd); return { content: text(summarize(r)), details: { result: r } }; }, }, commit: { description: "Commit with jj", parameters: { type: "object", properties: { message: { type: "string" } }, required: ["message"], additionalProperties: false, }, execute: async (_id: string, args: any) => { const message = String(args?.message ?? ""); if (!message) throw new Error("Missing message"); const r = await run(["commit", "-m", message], cwd); return { content: text(summarize(r)), details: { result: r } }; }, }, } as const; await ctx.tools.registerInvoker("jj", { invoke: async (toolId: string, args: unknown) => { const name = toolId.split(".").pop() as keyof typeof tools | undefined; if (!name || !(name in tools)) return new ToolInvocationResult({ data: null, error: "Unknown jj tool: " + toolId }); try { return new ToolInvocationResult({ data: await tools[name].execute(toolId, args ?? {}), error: null, }); } catch (cause) { return new ToolInvocationResult({ data: null, error: formatError(cause) }); } }, }); await ctx.tools.register( Object.entries(tools).map( ([name, tool]) => new ToolRegistration({ id: ToolId.make("jj." + name), pluginKey: "jj", sourceId: "pi", name, description: tool.description, inputSchema: tool.parameters as Record, }), ), ); return { extension: {} }; }, }); ================================================ FILE: agents/pi/extensions/pi-codemode/src/npm-plugin.ts ================================================ import { definePlugin, ToolRegistration, ToolId, ToolInvocationResult, type PluginContext, } from "@executor-js/sdk"; import { spawn } from "node:child_process"; import { promises as fs } from "node:fs"; import { join } from "node:path"; import { formatError } from "./util.js"; const readMaybe = async (path: string) => { try { return await fs.readFile(path, "utf8"); } catch { return null; } }; const pmCache = new Map(); const detectPackageManager = async (cwd: string) => { const cached = pmCache.get(cwd); if (cached) return cached; let result: { bin: string; args: readonly string[] }; if ((await readMaybe(join(cwd, "pnpm-lock.yaml"))) !== null) result = { bin: "pnpm", args: ["add"] as const }; else if ((await readMaybe(join(cwd, "bun.lockb"))) !== null) result = { bin: "bun", args: ["add"] as const }; else if ((await readMaybe(join(cwd, "bun.lock"))) !== null) result = { bin: "bun", args: ["add"] as const }; else if ((await readMaybe(join(cwd, "package-lock.json"))) !== null) result = { bin: "npm", args: ["install"] as const }; else result = { bin: "npm", args: ["install"] as const }; pmCache.set(cwd, result); return result; }; const run = async (cmd: string, args: string[], cwd: string, signal?: AbortSignal) => await new Promise((resolve, reject) => { const child = spawn(cmd, args, { cwd, signal, stdio: ["ignore", "pipe", "pipe"] }); let stdout = ""; let stderr = ""; child.stdout.on("data", (d) => (stdout += d.toString())); child.stderr.on("data", (d) => (stderr += d.toString())); child.on("error", reject); child.on("close", (code) => resolve({ code: code ?? 0, stdout, stderr, command: [cmd, ...args] }), ); }); const summarize = (r: any) => [ "$ " + r.command.join(" "), r.stdout?.trim(), r.stderr?.trim() ? "stderr: " + r.stderr.trim() : "", "exit " + r.code, ] .filter(Boolean) .join(" "); const text = (s: string) => [{ type: "text", text: s }]; export const npmPlugin = (cwd: string) => definePlugin({ key: "npm", init: async (ctx: PluginContext) => { const tools = { run: { description: "Run npm scripts", parameters: { type: "object", properties: { name: { type: "string" } }, required: ["name"], additionalProperties: false, }, execute: async (_id: string, args: any) => { const name = String(args?.name ?? ""); if (!name) throw new Error("Missing script name"); const r = await run("npm", ["run", name], cwd); return { content: text(summarize(r)), details: { result: r } }; }, }, install: { description: "Install packages with lockfile-aware package manager", parameters: { type: "object", properties: { packages: { type: "array", items: { type: "string" } } }, required: ["packages"], additionalProperties: false, }, execute: async (_id: string, args: any) => { const packages = Array.isArray(args?.packages) ? args.packages.map(String).filter(Boolean) : []; if (!packages.length) throw new Error("Missing packages"); const pm = await detectPackageManager(cwd); const r = await run(pm.bin, [...pm.args, ...packages], cwd); return { content: text(summarize(r)), details: { result: r, packageManager: pm.bin } }; }, }, } as const; await ctx.tools.registerInvoker("npm", { invoke: async (toolId: string, args: unknown) => { const name = toolId.split(".").pop() as keyof typeof tools | undefined; if (!name || !(name in tools)) return new ToolInvocationResult({ data: null, error: "Unknown npm tool: " + toolId }); try { return new ToolInvocationResult({ data: await tools[name].execute(toolId, args ?? {}), error: null, }); } catch (cause) { return new ToolInvocationResult({ data: null, error: formatError(cause) }); } }, }); await ctx.tools.register( Object.entries(tools).map( ([name, tool]) => new ToolRegistration({ id: ToolId.make("npm." + name), pluginKey: "npm", sourceId: "pi", name, description: tool.description, inputSchema: tool.parameters as Record, }), ), ); return { extension: {} }; }, }); ================================================ FILE: agents/pi/extensions/pi-codemode/src/pi-plugin.ts ================================================ import { definePlugin, ToolRegistration, ToolId, ToolInvocationResult, type PluginContext, } from "@executor-js/sdk"; import { createBuiltinToolset } from "./builtins.js"; import { builtinToolNames, type BuiltinToolName } from "./types.js"; import { formatError } from "./util.js"; export const piPlugin = (cwd: string) => definePlugin({ key: "pi", init: async (ctx: PluginContext) => { const tools = createBuiltinToolset(cwd); await ctx.tools.registerInvoker("pi", { invoke: async (toolId: string, args: unknown) => { const name = toolId.split(".").pop() as BuiltinToolName | undefined; if (!name || !(name in tools)) { return new ToolInvocationResult({ data: null, error: `Unknown pi tool: ${toolId}` }); } const tool = tools[name]; try { const result = await tool.execute(toolId, args ?? {}, undefined, () => {}); return new ToolInvocationResult({ data: result, error: null }); } catch (cause) { return new ToolInvocationResult({ data: null, error: formatError(cause) }); } }, }); await ctx.tools.register( builtinToolNames.map((name) => { const tool = tools[name]; return new ToolRegistration({ id: ToolId.make(`pi.${name}`), pluginKey: "pi", sourceId: "pi", name, description: tool.description, inputSchema: tool.parameters as Record, }); }), ); return { extension: {} }; }, }); ================================================ FILE: agents/pi/extensions/pi-codemode/src/read.ts ================================================ import { access as fsAccess, readFile as fsReadFile } from "fs/promises"; import { constants } from "fs"; import { extname } from "path"; import type { ImageContent, TextContent } from "@mariozechner/pi-ai"; import { Type } from "@sinclair/typebox"; // Much higher defaults than the built-in read tool (2000 lines / 50KB) export const CUSTOM_MAX_LINES = 100_000; export const CUSTOM_MAX_BYTES = 10 * 1024 * 1024; // 10MB const IMAGE_MIME_TYPES: Record = { ".jpg": "image/jpeg", ".jpeg": "image/jpeg", ".png": "image/png", ".gif": "image/gif", ".webp": "image/webp", }; function isImageFile(filePath: string): string | null { const ext = extname(filePath).toLowerCase(); return IMAGE_MIME_TYPES[ext] || null; } function formatSize(bytes: number): string { if (bytes < 1024) return `${bytes}B`; if (bytes < 1024 * 1024) return `${(bytes / 1024).toFixed(1)}KB`; return `${(bytes / (1024 * 1024)).toFixed(1)}MB`; } const readSchema = Type.Object({ path: Type.String({ description: "Path to the file to read (relative or absolute)" }), offset: Type.Optional( Type.Number({ description: "Line number to start reading from (1-indexed)" }), ), limit: Type.Optional(Type.Number({ description: "Maximum number of lines to read" })), }); export interface ReadToolOptions { maxLines?: number; maxBytes?: number; } export function createCustomReadTool(cwd: string, options?: ReadToolOptions) { const maxLines = options?.maxLines ?? CUSTOM_MAX_LINES; const maxBytes = options?.maxBytes ?? CUSTOM_MAX_BYTES; return { name: "read", label: "read", description: `Read the contents of a file. Supports text files and images (jpg, png, gif, webp). Images are sent as attachments. For text files, output is truncated to ${maxLines.toLocaleString()} lines or ${formatSize(maxBytes)} (whichever is hit first). Use offset/limit for large files. When you need the full file, continue with offset until complete.`, parameters: readSchema, execute: async ( _toolCallId: string, { path, offset, limit }: { path: string; offset?: number; limit?: number }, signal?: AbortSignal, ): Promise<{ content: (TextContent | ImageContent)[] }> => { if (signal?.aborted) throw new Error("Operation aborted"); const absolutePath = path.startsWith("/") ? path : `${cwd}/${path}`; // Check accessibility try { await fsAccess(absolutePath, constants.R_OK); } catch { throw new Error(`File not readable: ${path}`); } const mimeType = isImageFile(absolutePath); if (mimeType) { // Read image as base64 const buffer = await fsReadFile(absolutePath); const base64 = buffer.toString("base64"); return { content: [ { type: "text", text: `Read image file [${mimeType}]` }, { type: "image", data: base64, mimeType }, ], }; } // Read text file const buffer = await fsReadFile(absolutePath); const textContent = buffer.toString("utf-8"); const allLines = textContent.split("\n"); const totalFileLines = allLines.length; // Apply offset (1-indexed) const startLine = offset ? Math.max(0, offset - 1) : 0; const startLineDisplay = startLine + 1; if (startLine >= allLines.length) { throw new Error(`Offset ${offset} is beyond end of file (${allLines.length} lines total)`); } // Slice content based on offset/limit let selectedLines: string[]; let userLimitApplied = false; if (limit !== undefined) { const endLine = Math.min(startLine + limit, allLines.length); selectedLines = allLines.slice(startLine, endLine); userLimitApplied = endLine - startLine < allLines.length - startLine; } else { selectedLines = allLines.slice(startLine); } // Apply our own truncation (much higher limits) const result = truncateHead(selectedLines.join("\n"), { maxLines, maxBytes }); let outputText: string; if (result.firstLineExceedsLimit) { const firstLineSize = formatSize(Buffer.byteLength(selectedLines[0] || "", "utf-8")); outputText = `[Line ${startLineDisplay} is ${firstLineSize}, exceeds ${formatSize(maxBytes)} limit. Use bash: sed -n '${startLineDisplay}p' ${path} | head -c ${maxBytes}]`; } else if (result.truncated) { const endLineDisplay = startLineDisplay + result.outputLines - 1; const nextOffset = endLineDisplay + 1; outputText = result.content; if (result.truncatedBy === "lines") { outputText += `\n\n[Showing lines ${startLineDisplay}-${endLineDisplay} of ${totalFileLines}. Use offset=${nextOffset} to continue.]`; } else { outputText += `\n\n[Showing lines ${startLineDisplay}-${endLineDisplay} of ${totalFileLines} (${formatSize(maxBytes)} limit). Use offset=${nextOffset} to continue.]`; } } else if (userLimitApplied) { const endLine = startLine + (limit ?? 0); const remaining = totalFileLines - endLine; const nextOffset = endLine + 1; outputText = result.content; outputText += `\n\n[${remaining} more lines in file. Use offset=${nextOffset} to continue.]`; } else { outputText = result.content; } return { content: [{ type: "text", text: outputText }], }; }, }; } interface TruncationResult { content: string; truncated: boolean; truncatedBy: "lines" | "bytes" | null; outputLines: number; firstLineExceedsLimit: boolean; } interface TruncationOptions { maxLines: number; maxBytes: number; } function truncateHead(content: string, options: TruncationOptions): TruncationResult { const { maxLines, maxBytes } = options; const totalBytes = Buffer.byteLength(content, "utf-8"); const lines = content.split("\n"); const totalLines = lines.length; if (totalLines <= maxLines && totalBytes <= maxBytes) { return { content, truncated: false, truncatedBy: null, outputLines: totalLines, firstLineExceedsLimit: false, }; } // Check if first line alone exceeds limit const firstLineBytes = Buffer.byteLength(lines[0], "utf-8"); if (firstLineBytes > maxBytes) { return { content: "", truncated: true, truncatedBy: "bytes", outputLines: 0, firstLineExceedsLimit: true, }; } const outputLinesArr: string[] = []; let outputBytesCount = 0; let truncatedBy: "lines" | "bytes" = "lines"; for (let i = 0; i < lines.length && i < maxLines; i++) { const line = lines[i]; const lineBytes = Buffer.byteLength(line, "utf-8") + (i > 0 ? 1 : 0); if (outputBytesCount + lineBytes > maxBytes) { truncatedBy = "bytes"; break; } outputLinesArr.push(line); outputBytesCount += lineBytes; } if (outputLinesArr.length >= maxLines && outputBytesCount <= maxBytes) { truncatedBy = "lines"; } return { content: outputLinesArr.join("\n"), truncated: true, truncatedBy, outputLines: outputLinesArr.length, firstLineExceedsLimit: false, }; } ================================================ FILE: agents/pi/extensions/pi-codemode/src/render.ts ================================================ import { Text } from "@mariozechner/pi-tui"; import type { Theme } from "@mariozechner/pi-coding-agent"; import type { CodemodeTrace, TraceStep } from "./types.js"; const truncate = (value: string, max: number): string => (value.length <= max ? value : `${value.slice(0, max - 1)}…`); const formatDuration = (ms: number): string => { if (ms < 1000) return `${ms}ms`; return `${(ms / 1000).toFixed(ms < 10_000 ? 1 : 0)}s`; }; const summarizeStep = (step: TraceStep): string => { if (step.error) return truncate(step.error, 80); if (!step.output?.text) return ""; return truncate(step.output.text.split(/\r?\n/)[0] ?? "", 80); }; export function renderCodemodeCall(code: string, theme: Theme): Text { const lines = code.trim().split(/\r?\n/); const suffix = lines.length > 1 ? theme.fg("muted", ` (${lines.length} lines)`) : ""; const shown = lines.slice(0, 12).map((line) => theme.fg("accent", line)).join("\n"); let content = `${theme.fg("toolTitle", theme.bold("codemode"))}${suffix}\n${shown}`; if (lines.length > 12) content += theme.fg("muted", `\n… (${lines.length - 12} more lines)`); return new Text(content, 0, 0); } export function renderCodemodeResult(trace: CodemodeTrace, expanded: boolean, theme: Theme): Text { const duration = trace.endedAt ? formatDuration(trace.endedAt - trace.startedAt) : "running"; const isError = trace.status === "error" || trace.steps.some((step) => step.status === "error"); const statusColor = isError ? "error" : trace.status === "ok" ? "success" : "accent"; if (!expanded) { const lines: string[] = [ `${theme.fg("toolTitle", theme.bold("codemode"))} ${theme.fg(statusColor, trace.status)} ${theme.fg("muted", duration)}`, ]; for (const step of trace.steps) { const icon = step.status === "error" ? "✗" : step.status === "ok" ? "✓" : "○"; const iconColor = step.status === "error" ? "error" : step.status === "ok" ? "success" : "muted"; const summary = summarizeStep(step); lines.push(`${theme.fg(iconColor, icon)} ${step.label}${summary ? ` — ${theme.fg("muted", summary)}` : ""}`); } if (trace.value !== undefined) { const value = typeof trace.value === "string" ? trace.value : JSON.stringify(trace.value); lines.push(`→ ${theme.fg("success", truncate(value, 120))}`); } if (trace.error) lines.push(`→ ${theme.fg("error", truncate(trace.error, 120))}`); return new Text(lines.join("\n"), 0, 0); } const lines: string[] = [ theme.fg("toolTitle", theme.bold("codemode execution")), `status: ${theme.fg(statusColor, trace.status)} · ${duration} · ${trace.steps.length} step${trace.steps.length === 1 ? "" : "s"}`, "", theme.fg("muted", "// executed code:"), ...trace.code.trim().split(/\r?\n/).map((line) => theme.fg("accent", line)), ]; if (trace.steps.length > 0) { lines.push("", theme.bold("steps:")); for (const step of trace.steps) { const color = step.status === "error" ? "error" : step.status === "ok" ? "success" : "warning"; lines.push(`${theme.fg(color, step.status === "error" ? "✗" : "✓")} ${step.label}`); lines.push(theme.fg("muted", ` input: ${truncate(typeof step.input === "string" ? step.input : JSON.stringify(step.input), 240)}`)); if (step.output?.text) { for (const line of step.output.text.split(/\r?\n/).slice(0, 100)) lines.push(theme.fg("toolOutput", ` ${line}`)); } if (step.error) lines.push(theme.fg("error", ` ${step.error}`)); } } if (trace.logs.length > 0) { lines.push("", theme.bold(`logs (${trace.logs.length}):`)); for (const log of trace.logs.slice(0, 200)) lines.push(theme.fg("muted", ` ${log}`)); if (trace.logs.length > 200) lines.push(theme.fg("muted", ` … (${trace.logs.length - 200} more)`)); } if (trace.value !== undefined) { lines.push("", theme.bold("result:")); const value = typeof trace.value === "string" ? trace.value : JSON.stringify(trace.value, null, 2); for (const line of value.split(/\r?\n/).slice(0, 200)) lines.push(theme.fg("success", line)); } if (trace.error) { lines.push("", theme.bold("error:"), theme.fg("error", trace.error)); } return new Text(lines.join("\n"), 0, 0); } ================================================ FILE: agents/pi/extensions/pi-codemode/src/runtime.ts ================================================ import type { Executor } from "@executor-js/sdk"; import { acquireSandbox, type SandboxDelegates } from "./sandbox-cache.js"; import { CodemodeTraceRecorder } from "./trace.js"; import type { ImageContent } from "@mariozechner/pi-ai"; import type { CodemodeTrace, ToolResultSnapshot } from "./types.js"; const DEFAULT_TIMEOUT_MS = 30_000; const MAX_CODE_SIZE = 100_000; type RuntimeOptions = { code: string; cwd: string; executor: Executor; timeoutMs?: number; signal?: AbortSignal; onUpdate?: () => void; }; export type CodemodeExecutionResult = { value?: unknown; trace: CodemodeTrace; logs: string[]; }; const formatError = (cause: unknown): string => { if (cause instanceof Error) { const message = cause.message.trim(); return message.length > 0 ? message : cause.name; } if (typeof cause === "string") return cause; if (typeof cause === "object" && cause !== null) { if ("message" in cause && typeof cause.message === "string") { const message = cause.message.trim(); if (message.length > 0) return message; } try { return JSON.stringify(cause); } catch { return String(cause); } } return String(cause); }; const isToolResult = (value: unknown): value is { content: unknown[] } => typeof value === "object" && value !== null && "content" in value && Array.isArray((value as { content?: unknown[] }).content); const extractText = (value: unknown): string => { if (typeof value === "string") return value; if (typeof value === "undefined") return ""; if (value === null) return "null"; if (isToolResult(value)) { return value.content .filter( (entry): entry is { type?: unknown; text?: unknown } => typeof entry === "object" && entry !== null, ) .map((entry) => { if (entry.type === "text" && typeof entry.text === "string") return entry.text; return ""; }) .filter((line) => line.length > 0) .join(" ") .trim(); } return ""; }; const extractImages = (value: unknown): ImageContent[] => { if (!isToolResult(value)) return []; return value.content.filter( (entry): entry is ImageContent => typeof entry === "object" && entry !== null && (entry as Record).type === "image" && typeof (entry as Record).data === "string" && typeof (entry as Record).mimeType === "string", ); }; const toSnapshot = (value: unknown, isError = false): ToolResultSnapshot => ({ value, text: extractText(value), images: extractImages(value), isError, }); const CODE_PREFIX = [ '"use strict";', "", "const __invokeTool = SecureExec.bindings.invokeTool;", "const __emitLog = SecureExec.bindings.emitLog;", "const __emitResult = SecureExec.bindings.emitResult;", 'const __formatArg = (value) => typeof value === "string" ? value : JSON.stringify(value);', 'const __formatLine = (args) => args.map(__formatArg).join(" ");', "", "const __makeToolsProxy = (path = []) => new Proxy(() => undefined, {", " get(_target, prop) {", ' if (prop === "then" || typeof prop === "symbol") return undefined;', " return __makeToolsProxy([...path, String(prop)]);", " },", " apply(_target, _thisArg, args) {", ' const toolPath = path.join(".");', ' if (!toolPath) throw new Error("Tool path missing in invocation");', " return Promise.resolve(__invokeTool(toolPath, args[0])).catch((err) => {", ' throw new Error(err?.error || err?.message || "Tool invocation failed");', " });", " },", "});", "const tools = __makeToolsProxy();", "", "const console = {", ' log: (...args) => __emitLog("log", __formatLine(args)),', ' warn: (...args) => __emitLog("warn", __formatLine(args)),', ' error: (...args) => __emitLog("error", __formatLine(args)),', ' info: (...args) => __emitLog("info", __formatLine(args)),', ' debug: (...args) => __emitLog("debug", __formatLine(args)),', "};", "", "(async () => {", "try {", "const value = await (async () => {", ].join("\n"); const CODE_POSTFIX = [ "})();", "__emitResult(value);", "} catch (error) {", 'const message = error && typeof error === "object" ? (error.stack || error.message || String(error)) : String(error);', 'process.stderr.write(message + "\\n");', "process.exitCode = 1;", "}", "})();", ].join("\n"); const buildExecutionSource = (code: string): string => CODE_PREFIX + "\n" + code + "\n" + CODE_POSTFIX; const invokeExecutorApi = async ( executor: Executor, path: string, args: unknown, ): Promise<{ handled: true; data: unknown } | { handled: false }> => { switch (path) { case "list": case "tools.list": return { handled: true, data: await executor.tools.list(args ?? {}) }; case "schema": case "tools.schema": return { handled: true, data: await executor.tools.schema(String(args)) }; case "definitions": case "tools.definitions": return { handled: true, data: await executor.tools.definitions() }; case "sources.list": return { handled: true, data: await executor.sources.list() }; case "sources.remove": return { handled: true, data: await executor.sources.remove(String(args)) }; case "sources.refresh": return { handled: true, data: await executor.sources.refresh(String(args)) }; case "sources.detect": return { handled: true, data: await executor.sources.detect(String(args)) }; case "policies.list": return { handled: true, data: await executor.policies.list() }; case "policies.add": return { handled: true, data: await executor.policies.add(args as any) }; case "policies.remove": return { handled: true, data: await executor.policies.remove(String(args)) }; case "secrets.list": return { handled: true, data: await executor.secrets.list() }; case "secrets.resolve": return { handled: true, data: await executor.secrets.resolve(String(args)) }; case "secrets.status": return { handled: true, data: await executor.secrets.status(String(args)) }; case "secrets.set": return { handled: true, data: await executor.secrets.set(args as any) }; case "secrets.remove": return { handled: true, data: await executor.secrets.remove(String(args)) }; case "secrets.addProvider": return { handled: true, data: await executor.secrets.addProvider(args as any) }; case "secrets.providers": return { handled: true, data: await executor.secrets.providers() }; default: return { handled: false }; } }; // --- Tool invocation bridge (host side) --- // Uses structured clone via secure-exec bindings — no JSON serialization. const createInvokeTool = (executor: Executor, recorder: CodemodeTraceRecorder, onStep?: () => void) => async (path: unknown, args: unknown): Promise => { const toolPath = String(path); const step = recorder.startStep({ label: `tools.${toolPath}`, toolPath, input: args }); try { const direct = await invokeExecutorApi(executor, toolPath, args); let data: unknown; if (direct.handled) { data = direct.data; } else { const invoked = await executor.tools.invoke(toolPath, args, { onElicitation: "accept-all", }); if (invoked.error != null) { const message = formatError(invoked.error); recorder.finishStep(step, undefined, message); onStep?.(); throw { error: message }; } data = invoked.data; } const snapshot = toSnapshot(data); recorder.finishStep(step, snapshot); onStep?.(); // pi and fff tools return ToolResult objects ({ content: [...] }) — extract text // for backwards compat. Other tools (MCP, OpenAPI, GraphQL) return objects as-is. if ((toolPath.startsWith("pi.") || toolPath.startsWith("fff.")) && isToolResult(data)) { return extractText(data); } return data; } catch (cause) { const message = formatError(cause); recorder.finishStep(step, undefined, message); onStep?.(); throw { error: message }; } }; // --- Sequential per-cwd execution queue --- const executionQueues = new Map>(); export const runCodemode = async (options: RuntimeOptions): Promise => { const key = options.cwd; const prev = executionQueues.get(key); const next = (async (): Promise => { if (prev) { try { await prev; } catch { /* ignore previous execution errors */ } } return _runCodemode(options); })(); executionQueues.set(key, next); try { return await next; } finally { if (executionQueues.get(key) === next) { executionQueues.delete(key); } } }; // --- Core execution --- const noopDelegates: SandboxDelegates = { invokeTool: async () => undefined, emitLog: () => { }, emitResult: () => {}, }; const _runCodemode = async (options: RuntimeOptions): Promise => { const code = options.code; const timeoutMs = Math.max(100, options.timeoutMs ?? DEFAULT_TIMEOUT_MS); const recorder = new CodemodeTraceRecorder({ cwd: options.cwd, code }); const logs: string[] = []; if (code.length > MAX_CODE_SIZE) { const message = `Code exceeds ${MAX_CODE_SIZE} bytes`; const trace = recorder.finish({ status: "error", error: message }); return { trace, logs }; } const sandbox = await acquireSandbox(options.cwd); // Wire up delegates for this execution sandbox.delegates.invokeTool = createInvokeTool(options.executor, recorder, options.onUpdate); sandbox.delegates.emitResult = (value: unknown) => { capturedResult = value; }; sandbox.delegates.emitLog = (level: unknown, line: unknown) => { const entry = `[${String(level)}] ${String(line)}`; logs.push(entry); recorder.log(entry); }; let capturedResult: unknown = undefined; try { let aborted = false; let timedOut = false; const source = buildExecutionSource(code); const stdoutDecoder = new TextDecoder(); const stderrDecoder = new TextDecoder(); let stdout = ""; let stderr = ""; const proc = sandbox.kernel.spawn("node", ["-e", source], { cwd: options.cwd, onStdout: (data) => { stdout += stdoutDecoder.decode(data, { stream: true }); }, onStderr: (data) => { stderr += stderrDecoder.decode(data, { stream: true }); }, }); const timeoutId = setTimeout(() => { timedOut = true; proc.kill(9); }, timeoutMs); let abortHandler: (() => void) | undefined; if (options.signal) { abortHandler = () => { aborted = true; proc.kill(9); }; if (options.signal.aborted) abortHandler(); else options.signal.addEventListener("abort", abortHandler, { once: true }); } let exitCode: number; try { exitCode = await proc.wait(); } finally { clearTimeout(timeoutId); if (options.signal && abortHandler) options.signal.removeEventListener("abort", abortHandler); stdout += stdoutDecoder.decode(); stderr += stderrDecoder.decode(); } const value = capturedResult; if (timedOut) { const trace = recorder.finish({ status: "error", error: `Execution timed out after ${timeoutMs}ms`, }); return { trace, logs }; } if (aborted || options.signal?.aborted) { const trace = recorder.finish({ status: "aborted", value, error: "Execution aborted" }); return { value, trace, logs }; } if (exitCode !== 0) { const adjustedStderr = stderr.trim(); const error = adjustedStderr || stdout.trim() || `Process exited with code ${exitCode}`; const trace = recorder.finish({ status: "error", value, error }); return { value, trace, logs }; } const trace = recorder.finish({ status: "ok", value }); return { value, trace, logs }; } catch (cause) { const message = formatError(cause); const trace = recorder.finish({ status: options.signal?.aborted ? "aborted" : "error", error: message, }); return { trace, logs }; } finally { // Reset delegates to no-ops so next execution starts clean sandbox.delegates.invokeTool = noopDelegates.invokeTool; sandbox.delegates.emitLog = noopDelegates.emitLog; sandbox.delegates.emitResult = noopDelegates.emitResult; } }; ================================================ FILE: agents/pi/extensions/pi-codemode/src/sandbox-cache.ts ================================================ import { createInMemoryFileSystem, createKernel, createNodeRuntime, type Kernel } from "secure-exec"; // Bound once into secure-exec. Mutable delegates allow per-execution handler swap // without recreating the kernel. export type SandboxDelegates = { invokeTool: (path: unknown, args: unknown) => Promise; emitLog: (level: unknown, line: unknown) => void; emitResult: (value: unknown) => void; }; export type SandboxEntry = { kernel: Kernel; delegates: SandboxDelegates; }; const noopDelegates: SandboxDelegates = { invokeTool: async () => undefined, emitLog: () => {}, emitResult: () => {}, }; const cache = new Map(); export const acquireSandbox = async (cwd: string): Promise => { const existing = cache.get(cwd); if (existing) return existing; const delegates: SandboxDelegates = { ...noopDelegates }; // Fixed bindings that forward through mutable delegates const bindings = { invokeTool: (path: unknown, args: unknown) => delegates.invokeTool(path, args), emitLog: (level: unknown, line: unknown) => delegates.emitLog(level, line), emitResult: (value: unknown) => delegates.emitResult(value), }; const kernel = createKernel({ filesystem: createInMemoryFileSystem(), cwd }); await kernel.mount(createNodeRuntime({ bindings })); const entry: SandboxEntry = { kernel, delegates }; cache.set(cwd, entry); return entry; }; export const disposeAll = async (): Promise => { await Promise.allSettled( Array.from(cache.values()).map((entry) => entry.kernel.dispose()), ); cache.clear(); }; ================================================ FILE: agents/pi/extensions/pi-codemode/src/source-config.ts ================================================ import { readFile, stat } from "node:fs/promises"; import { homedir } from "node:os"; import { join } from "node:path"; import { parse, printParseErrorCode, type ParseError } from "jsonc-parser"; const SECRET_REF_PREFIX = "secret-public-ref:"; type UnknownRecord = Record; type HeaderSecretRef = { secretId: string; prefix?: string }; type PluginHeaderValue = string | HeaderSecretRef; export type OpenApiSourceConfig = { kind: "openapi"; spec: string; baseUrl?: string; namespace?: string; headers?: Record; }; export type GraphqlSourceConfig = { kind: "graphql"; endpoint: string; introspectionJson?: string; namespace?: string; headers?: Record; }; export type McpRemoteSourceConfig = { kind: "mcp"; transport: "remote"; name: string; endpoint: string; remoteTransport?: "streamable-http" | "sse" | "auto"; namespace?: string; queryParams?: Record; headers?: Record; }; export type McpStdioSourceConfig = { kind: "mcp"; transport: "stdio"; name: string; command: string; args?: string[]; env?: Record; cwd?: string; namespace?: string; }; export type SupportedSourceConfig = | OpenApiSourceConfig | GraphqlSourceConfig | McpRemoteSourceConfig | McpStdioSourceConfig; export type UnsupportedSourceConfig = { index: number; reason: string; value: unknown; kind?: string; }; export type LoadedSourceConfig = { configPath: string; mtimeMs: number; sources: SupportedSourceConfig[]; unsupported: UnsupportedSourceConfig[]; }; const isRecord = (value: unknown): value is UnknownRecord => typeof value === "object" && value !== null && !Array.isArray(value); const parseStringMap = (value: unknown): Record | null => { if (!isRecord(value)) return null; const output: Record = {}; for (const [key, item] of Object.entries(value)) { if (typeof item !== "string") return null; output[key] = item; } return output; }; const parseStringArray = (value: unknown): string[] | null => { if (!Array.isArray(value)) return null; if (value.some((item) => typeof item !== "string")) return null; return [...value] as string[]; }; const parseHeaderValue = (value: unknown): PluginHeaderValue | null => { if (typeof value === "string") { if (value?.startsWith(SECRET_REF_PREFIX)) { return { secretId: value.slice(SECRET_REF_PREFIX.length) }; } return value; } if (!isRecord(value) || typeof value.value !== "string") return null; const prefix = typeof value.prefix === "string" ? value.prefix : undefined; const raw = value.value; if (raw?.startsWith(SECRET_REF_PREFIX)) { const secretId = raw.slice(SECRET_REF_PREFIX.length); return prefix ? { secretId, prefix } : { secretId }; } return prefix ? prefix + raw : raw; }; const parseHeaderMap = (value: unknown): Record | null => { if (!isRecord(value)) return null; const output: Record = {}; for (const [key, item] of Object.entries(value)) { const parsed = parseHeaderValue(item); if (parsed == null) return null; output[key] = parsed; } return output; }; const parseMcpSource = (entry: UnknownRecord): { source?: SupportedSourceConfig; reason?: string } => { const transport = entry.transport; if (transport !== "remote" && transport !== "stdio") { return { reason: "mcp.transport must be \"remote\" or \"stdio\"" }; } if (typeof entry.name !== "string" || entry.name.trim().length === 0) { return { reason: "mcp.name must be a non-empty string" }; } if (entry.namespace !== undefined && typeof entry.namespace !== "string") { return { reason: "mcp.namespace must be a string" }; } if (transport === "remote") { if (typeof entry.endpoint !== "string" || entry.endpoint.trim().length === 0) { return { reason: "mcp(remote).endpoint must be a non-empty string" }; } if ( entry.remoteTransport !== undefined && entry.remoteTransport !== "streamable-http" && entry.remoteTransport !== "sse" && entry.remoteTransport !== "auto" ) { return { reason: "mcp(remote).remoteTransport must be streamable-http|sse|auto" }; } if (entry.queryParams !== undefined && parseStringMap(entry.queryParams) == null) { return { reason: "mcp(remote).queryParams must be a string map" }; } if (entry.headers !== undefined && parseStringMap(entry.headers) == null) { return { reason: "mcp(remote).headers must be a string map" }; } return { source: { kind: "mcp", transport: "remote", name: entry.name, endpoint: entry.endpoint, remoteTransport: entry.remoteTransport, namespace: entry.namespace, queryParams: entry.queryParams as Record | undefined, headers: entry.headers as Record | undefined, }, }; } if (typeof entry.command !== "string" || entry.command.trim().length === 0) { return { reason: "mcp(stdio).command must be a non-empty string" }; } if (entry.args !== undefined && parseStringArray(entry.args) == null) { return { reason: "mcp(stdio).args must be a string array" }; } if (entry.env !== undefined && parseStringMap(entry.env) == null) { return { reason: "mcp(stdio).env must be a string map" }; } if (entry.cwd !== undefined && typeof entry.cwd !== "string") { return { reason: "mcp(stdio).cwd must be a string" }; } return { source: { kind: "mcp", transport: "stdio", name: entry.name, command: entry.command, args: entry.args as string[] | undefined, env: entry.env as Record | undefined, cwd: entry.cwd as string | undefined, namespace: entry.namespace, }, }; }; const parseOpenApiSource = (entry: UnknownRecord): { source?: SupportedSourceConfig; reason?: string } => { if (typeof entry.spec !== "string" || entry.spec.trim().length === 0) { return { reason: "openapi.spec must be a non-empty string" }; } if (entry.baseUrl !== undefined && typeof entry.baseUrl !== "string") { return { reason: "openapi.baseUrl must be a string" }; } if (entry.namespace !== undefined && typeof entry.namespace !== "string") { return { reason: "openapi.namespace must be a string" }; } const headers = entry.headers === undefined ? undefined : parseHeaderMap(entry.headers); if (entry.headers !== undefined && headers == null) { return { reason: "openapi.headers must map to string or secret refs" }; } return { source: { kind: "openapi", spec: entry.spec, baseUrl: entry.baseUrl as string | undefined, namespace: entry.namespace as string | undefined, headers: headers ?? undefined, }, }; }; const parseGraphqlSource = (entry: UnknownRecord): { source?: SupportedSourceConfig; reason?: string } => { if (typeof entry.endpoint !== "string" || entry.endpoint.trim().length === 0) { return { reason: "graphql.endpoint must be a non-empty string" }; } if (entry.introspectionJson !== undefined && typeof entry.introspectionJson !== "string") { return { reason: "graphql.introspectionJson must be a string" }; } if (entry.namespace !== undefined && typeof entry.namespace !== "string") { return { reason: "graphql.namespace must be a string" }; } const headers = entry.headers === undefined ? undefined : parseHeaderMap(entry.headers); if (entry.headers !== undefined && headers == null) { return { reason: "graphql.headers must map to string or secret refs" }; } return { source: { kind: "graphql", endpoint: entry.endpoint, introspectionJson: entry.introspectionJson as string | undefined, namespace: entry.namespace as string | undefined, headers: headers ?? undefined, }, }; }; const parseErrorsToMessage = (errors: ParseError[]): string => errors.map((error) => "offset " + error.offset + ": " + printParseErrorCode(error.error)).join("; "); export const getExecutorConfigPath = (): string => join(homedir(), ".pi", "agent", "executor.jsonc"); export const loadSourcesFromConfig = async (): Promise => { const configPath = getExecutorConfigPath(); let raw: string; let mtimeMs: number; try { mtimeMs = (await stat(configPath)).mtimeMs; raw = await readFile(configPath, "utf8"); } catch (cause) { const error = cause as NodeJS.ErrnoException; if (error?.code === "ENOENT") { return { configPath, mtimeMs: 0, sources: [], unsupported: [], }; } throw new Error("Failed reading " + configPath + ": " + (error?.message ?? String(cause))); } const parseErrors: ParseError[] = []; const parsed = parse(raw, parseErrors); if (parseErrors.length > 0) { throw new Error("Invalid JSONC in " + configPath + ": " + parseErrorsToMessage(parseErrors)); } if (!isRecord(parsed)) { throw new Error("Invalid config at " + configPath + ": root must be an object"); } const rawSources = parsed.sources; if (rawSources === undefined) { return { configPath, mtimeMs, sources: [], unsupported: [] }; } if (!Array.isArray(rawSources)) { throw new Error("Invalid config at " + configPath + ": \"sources\" must be an array"); } const sources: SupportedSourceConfig[] = []; const unsupported: UnsupportedSourceConfig[] = []; for (let index = 0; index < rawSources.length; index++) { const rawSource = rawSources[index]; if (!isRecord(rawSource)) { unsupported.push({ index, reason: "source entry must be an object", value: rawSource }); continue; } const kind = typeof rawSource.kind === "string" ? rawSource.kind : undefined; if (kind === "mcp") { const parsedSource = parseMcpSource(rawSource); if (!parsedSource.source) { unsupported.push({ index, kind, reason: parsedSource.reason ?? "invalid mcp source", value: rawSource }); continue; } sources.push(parsedSource.source); continue; } if (kind === "openapi") { const parsedSource = parseOpenApiSource(rawSource); if (!parsedSource.source) { unsupported.push({ index, kind, reason: parsedSource.reason ?? "invalid openapi source", value: rawSource }); continue; } sources.push(parsedSource.source); continue; } if (kind === "graphql") { const parsedSource = parseGraphqlSource(rawSource); if (!parsedSource.source) { unsupported.push({ index, kind, reason: parsedSource.reason ?? "invalid graphql source", value: rawSource }); continue; } sources.push(parsedSource.source); continue; } unsupported.push({ index, kind, reason: kind ? "unsupported source kind: " + kind : "source.kind is required", value: rawSource, }); } return { configPath, mtimeMs, sources, unsupported }; }; ================================================ FILE: agents/pi/extensions/pi-codemode/src/source-hydrate.ts ================================================ import type { Executor } from "@executor-js/sdk"; import type { SupportedSourceConfig, UnsupportedSourceConfig } from "./source-config.js"; type ExecutorWithPlugins = Executor & { mcp?: { addSource: (source: unknown) => Promise }; openapi?: { addSpec: (source: unknown) => Promise }; graphql?: { addSource: (source: unknown) => Promise }; }; type HydrateOptions = { configPath: string; unsupported: UnsupportedSourceConfig[]; }; const sourceKey = (source: SupportedSourceConfig): string => { if (source.namespace) return source.kind + ":" + source.namespace; if (source.kind === "mcp") { if (source.transport === "remote") return "mcp:remote:" + source.endpoint; return "mcp:stdio:" + source.command + ":" + (source.args ?? []).join("\u0000") + ":" + (source.cwd ?? ""); } if (source.kind === "openapi") return "openapi:" + source.spec; return "graphql:" + source.endpoint; }; const summarize = (source: SupportedSourceConfig): string => { if (source.namespace) return source.kind + ":" + source.namespace; if (source.kind === "mcp") return source.transport === "remote" ? "mcp:" + source.endpoint : "mcp:" + source.command; if (source.kind === "openapi") return "openapi:" + source.spec; return "graphql:" + source.endpoint; }; export const hydrateExecutorSources = async ( executor: Executor, sources: SupportedSourceConfig[], options: HydrateOptions, ): Promise => { if (options.unsupported.length > 0) { for (const entry of options.unsupported) { console.warn( "[codemode] Skipping unsupported source in " + options.configPath + " (#" + entry.index + "): " + entry.reason, ); } } if (sources.length === 0) return; const ext = executor as ExecutorWithPlugins; const existing = await executor.sources.list(); const existingIds = new Set(existing.map((source: { id: string }) => source.id)); const seen = new Set(); for (const source of sources) { const key = sourceKey(source); if (seen.has(key)) continue; seen.add(key); if (source.namespace && existingIds.has(source.namespace)) continue; if (source.kind === "mcp") { if (!ext.mcp?.addSource) { throw new Error("Missing MCP plugin while hydrating " + summarize(source) + " from " + options.configPath); } await ext.mcp.addSource(source); continue; } if (source.kind === "openapi") { if (!ext.openapi?.addSpec) { throw new Error("Missing OpenAPI plugin while hydrating " + summarize(source) + " from " + options.configPath); } await ext.openapi.addSpec(source); continue; } if (!ext.graphql?.addSource) { throw new Error("Missing GraphQL plugin while hydrating " + summarize(source) + " from " + options.configPath); } await ext.graphql.addSource(source); } }; ================================================ FILE: agents/pi/extensions/pi-codemode/src/trace.ts ================================================ import type { ImageContent } from "@mariozechner/pi-ai"; import type { CodemodeTrace, ToolResultSnapshot, TraceStep } from "./types.js"; const clone = (value: T): T => { if (typeof structuredClone === "function") return structuredClone(value); return JSON.parse(JSON.stringify(value)) as T; }; const formatDuration = (ms: number): string => { if (ms < 1000) return `${ms}ms`; return `${(ms / 1000).toFixed(ms < 10_000 ? 1 : 0)}s`; }; const summarizeText = (text: string, max = 140): string => { const first = text.split(/\r?\n/).find((line) => line.trim().length > 0) ?? ""; if (first.length <= max) return first; return `${first.slice(0, max - 1)}…`; }; const summarizeValue = (value: unknown): string => { if (value === undefined) return "undefined"; if (value === null) return "null"; if (typeof value === "string") return JSON.stringify(value.length > 80 ? `${value.slice(0, 77)}…` : value); if (typeof value === "number" || typeof value === "boolean") return String(value); if (Array.isArray(value)) return `[${value.length} items]`; if (typeof value === "object") return `{${Object.keys(value as Record).slice(0, 4).join(", ")}}`; return String(value); }; export class CodemodeTraceRecorder { private readonly trace: CodemodeTrace; private readonly steps = new Map(); private id = 0; constructor(input: { cwd: string; code: string }) { this.trace = { cwd: input.cwd, code: input.code, startedAt: Date.now(), status: "running", logs: [], steps: [], }; } log(line: string): void { this.trace.logs.push(line); } startStep(input: { label: string; toolPath: string; input: unknown }): string { const id = `step_${++this.id}`; const step: TraceStep = { id, label: input.label, toolPath: input.toolPath, input: clone(input.input), startedAt: Date.now(), status: "running", }; this.steps.set(id, step); this.trace.steps.push(step); return id; } finishStep(id: string, output?: ToolResultSnapshot, error?: string): void { const step = this.steps.get(id); if (!step) return; step.endedAt = Date.now(); if (error) { step.status = "error"; step.error = error; return; } if (output) { step.output = clone(output); step.status = output.isError ? "error" : "ok"; if (output.isError) step.error = output.text; return; } step.status = "ok"; } finish(input: { status: CodemodeTrace["status"]; value?: unknown; error?: string }): CodemodeTrace { this.trace.endedAt = Date.now(); this.trace.status = input.status; this.trace.value = input.value; this.trace.error = input.error; return clone(this.trace); } } export const summarizeTraceForContext = (trace: CodemodeTrace): string => { const duration = trace.endedAt ? formatDuration(trace.endedAt - trace.startedAt) : undefined; const errors = trace.steps.filter((s) => s.status === "error").length; return [ `codemode ${trace.status}`, duration, `${trace.steps.length} step${trace.steps.length === 1 ? "" : "s"}`, errors > 0 ? `${errors} err` : undefined, trace.logs.length > 0 ? `${trace.logs.length} log${trace.logs.length === 1 ? "" : "s"}` : undefined, trace.value !== undefined ? `value=${summarizeValue(trace.value)}` : undefined, trace.error ? `error=${summarizeText(trace.error, 120)}` : undefined, ] .filter((v): v is string => Boolean(v)) .join(" · "); }; export const formatTraceForAgent = (trace: CodemodeTrace, value: unknown, logs: string[]): { text: string; images: ImageContent[] } => { const durationMs = trace.endedAt ? trace.endedAt - trace.startedAt : 0; const duration = formatDuration(durationMs); const result: string[] = [`status:${trace.status} duration:${duration}`]; if (trace.steps.length > 0) { result.push("", "steps:"); for (const step of trace.steps) { const stepDuration = step.endedAt ? formatDuration(step.endedAt - step.startedAt) : ""; const prefix = step.status === "error" ? "[ERR]" : step.status === "ok" ? "[OK]" : "[...]"; result.push(`${prefix} ${step.label} ${stepDuration}`); if (step.error) { result.push(` error: ${step.error}`); } else if (step.output?.text) { const lines = step.output.text.split(/\r?\n/).filter((l) => l.trim().length > 0); const preview = lines.slice(0, 8).join("\n "); result.push(` output: ${preview}${lines.length > 8 ? `\n ... (${lines.length - 8} more lines)` : ""}`); } } } if (trace.error) { result.push("", `execution error: ${trace.error}`); } if (value !== undefined) { const raw = typeof value === "string" ? value : JSON.stringify(value); result.push("", `return value: ${raw}`); } if (logs.length > 0) { result.push("", "logs:"); for (const log of logs) { result.push(log.replace(/^\[\w+\] /, "")); } } const images = trace.steps.flatMap((s) => s.output?.images ?? []); return { text: result.join("\n"), images }; }; ================================================ FILE: agents/pi/extensions/pi-codemode/src/turndown.d.ts ================================================ declare module "turndown" { export default class TurndownService { turndown(html: string): string; } } ================================================ FILE: agents/pi/extensions/pi-codemode/src/types.ts ================================================ import type { ImageContent } from "@mariozechner/pi-ai"; export const builtinToolNames = ["read", "bash", "edit", "write", "grep", "find", "ls", "webfetch"] as const; export type BuiltinToolName = (typeof builtinToolNames)[number]; export const fffToolNames = [ "grep", "fileSearch", "multiGrep", "recentFiles", "searchThenGrep", ] as const; export type FffToolName = (typeof fffToolNames)[number]; export type ToolResultSnapshot = { value: unknown; text: string; images?: ImageContent[]; isError: boolean; }; export type TraceStep = { id: string; label: string; toolPath: string; input: unknown; startedAt: number; endedAt?: number; status: "running" | "ok" | "error"; output?: ToolResultSnapshot; error?: string; }; export type CodemodeTrace = { cwd: string; code: string; startedAt: number; endedAt?: number; status: "running" | "ok" | "error" | "aborted"; value?: unknown; logs: string[]; steps: TraceStep[]; error?: string; }; export type CodemodeResultDetails = { trace: CodemodeTrace; value?: unknown; logs: string[]; summary: string; }; ================================================ FILE: agents/pi/extensions/pi-codemode/src/util.ts ================================================ export const stripCodeFences = (code: string): string => { const trimmed = code.trim(); const match = trimmed.match(/^(```|~~~)(?:\w+)?\r?\n([\s\S]+)\r?\n\1$/); if (match) return match[2].trim(); return trimmed; }; export const buildPromptGuidelines = () => [ "Write plain JavaScript body only. No imports/exports. No markdown fences", "Use things from tools.* namespace to access tools", "Only do filesystem related operations using tools.pi.*", "Never rely on bash if it can be done in JavaScript", "Batch related work in one codemode call", "Prefer tools.pi.read over shell commands: read({ offset, limit }) gives line ranges", "Prefer tools.pi.grep over grep: it returns structured results with line numbers", "Output visibility: use `return value` to show output visibly, use `console.log(value)` for logs. Bare expressions like `result;` or string tricks like `'' + result` are silently swallowed.", "Bash is available for package managers (npm, pnpm, bun, yarn, npx, node), file operations (mkdir, cp, mv, rm, ln, chmod), and utilities (pwd, echo, which, uname, date). Use tools.pi.* for reading/writing/searching/grepping/editing files.", ]; export const formatError = (cause: unknown): string => { if (cause instanceof Error) return cause.message || cause.name; if (typeof cause === "string") return cause; try { return JSON.stringify(cause); } catch { return String(cause); } }; ================================================ FILE: agents/pi/extensions/pi-codemode/src/webfetch.ts ================================================ import TurndownService from "turndown"; export type WebfetchInput = { url: string; format?: "markdown" | "text" | "html"; timeout?: number; }; const MAX_RESPONSE_SIZE = 5 * 1024 * 1024; const DEFAULT_TIMEOUT = 30 * 1000; const MAX_TIMEOUT = 120 * 1000; const turndownService = new TurndownService(); // Simple HTML to text conversion function htmlToText(html: string): string { return ( html .replace(/]*>[\s\S]*?<\/script>/gi, "") .replace(/]*>[\s\S]*?<\/style>/gi, "") .replace(/<\/p>/gi, "\n\n") .replace(//gi, "\n") .replace(/<\/div>/gi, "\n") .replace(/<\/h[1-6]>/gi, "\n\n") .replace(/<\/li>/gi, "\n") .replace(/<[^>]+>/g, "") .replace(/ /g, " ") .replace(/&/g, "&") .replace(/</g, "<") .replace(/>/g, ">") .replace(/"/g, '"') .replace(/'/g, "'") .replace(/\n{3,}/g, "\n\n") .trim() ); } function htmlToMarkdown(html: string): string { try { return turndownService.turndown(html); } catch { return htmlToText(html); } } export async function webfetch( _toolId: string, args: WebfetchInput, _signal: AbortSignal | undefined, _onUpdate: () => void, ): Promise<{ content: Array<{ type: "text"; text: string }>; details?: Record }> { const format = args.format ?? "markdown"; let url = args.url; if (url.startsWith("http://")) { try { const parsed = new URL(url); const isLocalhost = parsed.hostname === "localhost" || parsed.hostname === "127.0.0.1" || parsed.hostname === "::1"; if (!isLocalhost) { url = url.replace("http://", "https://"); } } catch { return { content: [{ type: "text", text: "" }], details: { url: args.url, format, error: "Invalid URL" }, }; } } if (!url.startsWith("https://")) { return { content: [{ type: "text", text: "" }], details: { url: args.url, format, error: "Invalid URL" }, }; } const timeout = Math.min((args.timeout ?? DEFAULT_TIMEOUT / 1000) * 1000, MAX_TIMEOUT); const controller = new AbortController(); const timeoutId = setTimeout(() => controller.abort(), timeout); try { const response = await fetch(url, { signal: controller.signal, headers: { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36", Accept: "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Accept-Language": "en-US,en;q=0.9", }, }); clearTimeout(timeoutId); if (!response.ok) { return { content: [{ type: "text", text: "" }], details: { url, format, error: `HTTP ${response.status}` }, }; } const contentLength = response.headers.get("content-length"); if (contentLength && parseInt(contentLength) > MAX_RESPONSE_SIZE) { return { content: [{ type: "text", text: "" }], details: { url, format, error: "Response too large" }, }; } const contentType = response.headers.get("content-type") || ""; const mime = contentType.split(";")[0]?.trim().toLowerCase() || ""; if ( mime.startsWith("image/") || mime.startsWith("video/") || mime.startsWith("audio/") || mime.startsWith("application/pdf") || mime.startsWith("application/zip") || mime.startsWith("application/octet-stream") ) { return { content: [{ type: "text", text: "" }], details: { url, format, contentType: mime, skipped: true }, }; } const arrayBuffer = await response.arrayBuffer(); if (arrayBuffer.byteLength > MAX_RESPONSE_SIZE) { return { content: [{ type: "text", text: "" }], details: { url, format, error: "Response too large" }, }; } let text = new TextDecoder().decode(arrayBuffer); let output = ""; switch (format) { case "markdown": output = mime.includes("text/html") ? htmlToMarkdown(text) : text; break; case "text": output = mime.includes("text/html") ? htmlToText(text) : text; break; case "html": output = text; break; } return { content: [{ type: "text", text: output.trim() }], details: { url, format, contentType: mime }, }; } catch (error) { clearTimeout(timeoutId); const errorMsg = error instanceof Error ? error.message : "Unknown error"; return { content: [{ type: "text", text: "" }], details: { url, format, error: errorMsg }, }; } } ================================================ FILE: agents/pi/extensions/pi-codemode/tsconfig.json ================================================ { "compilerOptions": { "target": "ES2022", "module": "NodeNext", "moduleResolution": "NodeNext", "strict": true, "esModuleInterop": true, "skipLibCheck": true, "forceConsistentCasingInFileNames": true, "resolveJsonModule": true, "declaration": true, "declarationMap": true, "noEmit": true, "paths": { "@executor/codemode-core": ["./node_modules/@executor/codemode-core/src/index.ts"] } }, "include": ["./src/**/*.ts", "./index.ts"], "exclude": ["node_modules", "examples"] } ================================================ FILE: agents/pi/extensions/prompt-timer.ts ================================================ import type { AssistantMessage } from "@mariozechner/pi-ai"; import type { ExtensionAPI } from "@mariozechner/pi-coding-agent"; function isAssistantMessage(message: unknown): message is AssistantMessage { if (!message || typeof message !== "object") return false; const role = (message as { role?: unknown }).role; return role === "assistant"; } function formatDuration(ms: number): string { const seconds = Math.floor(ms / 1000); const mins = Math.floor(seconds / 60); const secs = seconds % 60; if (mins > 0) { return `${mins}m ${secs}s`; } return `${secs}s`; } export default function (pi: ExtensionAPI) { let agentStartMs: number | null = null; pi.on("agent_start", () => { agentStartMs = Date.now(); }); pi.on("agent_end", (event, ctx) => { if (!ctx.hasUI) return; if (agentStartMs === null) return; const elapsedMs = Date.now() - agentStartMs; agentStartMs = null; if (elapsedMs <= 0) return; let input = 0; let output = 0; let cacheRead = 0; let cacheWrite = 0; let totalTokens = 0; for (const message of event.messages) { if (!isAssistantMessage(message)) continue; input += message.usage.input || 0; output += message.usage.output || 0; cacheRead += message.usage.cacheRead || 0; cacheWrite += message.usage.cacheWrite || 0; totalTokens += message.usage.totalTokens || 0; } if (output <= 0) return; const elapsedSeconds = elapsedMs / 1000; const tokensPerSecond = output / elapsedSeconds; const message = `TPS: ${tokensPerSecond.toFixed(1)} tok/s · took ${formatDuration(elapsedMs)}`; ctx.ui.notify(message, "info"); }); } ================================================ FILE: agents/pi/extensions/session-breakdown.ts ================================================ /** * /session-breakdown * * Interactive TUI that analyzes ~/.pi/agent/sessions (recursively, *.jsonl) and shows * last 7/30/90 days of: * - sessions/day * - messages/day * - tokens/day (if available) * - cost/day (if available) * - model breakdown (sessions/messages/tokens + cost) * * Graph: * - GitHub-contributions-style calendar (weeks x weekdays) * - Hue: weighted mix of popular model colors (weighted by the selected metric) * - Brightness: selected metric per day (log-scaled) */ import type { ExtensionAPI, ExtensionContext } from "@mariozechner/pi-coding-agent"; import { BorderedLoader } from "@mariozechner/pi-coding-agent"; import { Key, matchesKey, sliceByColumn, type Component, type TUI, truncateToWidth, visibleWidth, } from "@mariozechner/pi-tui"; import os from "node:os"; import path from "node:path"; import fs from "node:fs/promises"; import { createReadStream, type Dirent } from "node:fs"; import readline from "node:readline"; type ModelKey = string; // `${provider}/${model}` interface ParsedSession { filePath: string; startedAt: Date; dayKeyLocal: string; // YYYY-MM-DD (local) modelsUsed: Set; messages: number; tokens: number; totalCost: number; costByModel: Map; messagesByModel: Map; tokensByModel: Map; } interface DayAgg { date: Date; // local midnight dayKeyLocal: string; sessions: number; messages: number; tokens: number; totalCost: number; costByModel: Map; sessionsByModel: Map; messagesByModel: Map; tokensByModel: Map; } interface RangeAgg { days: DayAgg[]; dayByKey: Map; sessions: number; totalMessages: number; totalTokens: number; totalCost: number; modelCost: Map; modelSessions: Map; // number of sessions where model was used modelMessages: Map; modelTokens: Map; } interface RGB { r: number; g: number; b: number; } interface BreakdownData { generatedAt: Date; ranges: Map; palette: { modelColors: Map; otherColor: RGB; orderedModels: ModelKey[]; }; } const SESSION_ROOT = path.join(os.homedir(), ".pi", "agent", "sessions"); const RANGE_DAYS = [7, 30, 90] as const; type MeasurementMode = "sessions" | "messages" | "tokens"; type BreakdownProgressPhase = "scan" | "parse" | "finalize"; interface BreakdownProgressState { phase: BreakdownProgressPhase; foundFiles: number; parsedFiles: number; totalFiles: number; currentFile?: string; } function setBorderedLoaderMessage(loader: BorderedLoader, message: string) { // BorderedLoader wraps a (Cancellable)Loader which supports setMessage(), // but it doesn't expose it publicly. Access the inner loader for progress updates. const inner = (loader as any)["loader"]; // eslint-disable-line @typescript-eslint/no-explicit-any if (inner && typeof inner.setMessage === "function") { inner.setMessage(message); } } // Dark-ish background and empty cell color (close to GitHub dark) const DEFAULT_BG: RGB = { r: 13, g: 17, b: 23 }; const EMPTY_CELL_BG: RGB = { r: 22, g: 27, b: 34 }; // Default palette (assigned to top models) const PALETTE: RGB[] = [ { r: 64, g: 196, b: 99 }, // green { r: 47, g: 129, b: 247 }, // blue { r: 163, g: 113, b: 247 }, // purple { r: 255, g: 159, b: 10 }, // orange { r: 244, g: 67, b: 54 }, // red ]; function clamp01(x: number): number { return Math.max(0, Math.min(1, x)); } function lerp(a: number, b: number, t: number): number { return a + (b - a) * t; } function mixRgb(a: RGB, b: RGB, t: number): RGB { return { r: Math.round(lerp(a.r, b.r, t)), g: Math.round(lerp(a.g, b.g, t)), b: Math.round(lerp(a.b, b.b, t)), }; } function weightedMix(colors: Array<{ color: RGB; weight: number }>): RGB { let total = 0; let r = 0; let g = 0; let b = 0; for (const c of colors) { if (!Number.isFinite(c.weight) || c.weight <= 0) continue; total += c.weight; r += c.color.r * c.weight; g += c.color.g * c.weight; b += c.color.b * c.weight; } if (total <= 0) return EMPTY_CELL_BG; return { r: Math.round(r / total), g: Math.round(g / total), b: Math.round(b / total) }; } function ansiBg(rgb: RGB, text: string): string { return `\x1b[48;2;${rgb.r};${rgb.g};${rgb.b}m${text}\x1b[0m`; } function ansiFg(rgb: RGB, text: string): string { return `\x1b[38;2;${rgb.r};${rgb.g};${rgb.b}m${text}\x1b[0m`; } function dim(text: string): string { return `\x1b[2m${text}\x1b[0m`; } function bold(text: string): string { return `\x1b[1m${text}\x1b[0m`; } function formatCount(n: number): string { if (!Number.isFinite(n) || n === 0) return "0"; if (n >= 1_000_000_000) return `${(n / 1_000_000_000).toFixed(1)}B`; if (n >= 1_000_000) return `${(n / 1_000_000).toFixed(1)}M`; if (n >= 10_000) return `${(n / 1_000).toFixed(1)}K`; return n.toLocaleString("en-US"); } function formatUsd(cost: number): string { if (!Number.isFinite(cost)) return "$0.00"; if (cost >= 1) return `$${cost.toFixed(2)}`; if (cost >= 0.1) return `$${cost.toFixed(3)}`; return `$${cost.toFixed(4)}`; } function padRight(s: string, n: number): string { const delta = n - s.length; return delta > 0 ? s + " ".repeat(delta) : s; } function padLeft(s: string, n: number): string { const delta = n - s.length; return delta > 0 ? " ".repeat(delta) + s : s; } function toLocalDayKey(d: Date): string { const yyyy = d.getFullYear(); const mm = String(d.getMonth() + 1).padStart(2, "0"); const dd = String(d.getDate()).padStart(2, "0"); return `${yyyy}-${mm}-${dd}`; } function localMidnight(d: Date): Date { return new Date(d.getFullYear(), d.getMonth(), d.getDate(), 0, 0, 0, 0); } function addDaysLocal(d: Date, days: number): Date { const x = new Date(d); x.setDate(x.getDate() + days); return x; } function countDaysInclusiveLocal(start: Date, end: Date): number { // Avoid ms-based day math because DST transitions can make a “day” 23/25h in local time. let n = 0; for (let d = new Date(start); d <= end; d = addDaysLocal(d, 1)) n++; return n; } function mondayIndex(date: Date): number { // Mon=0 .. Sun=6 return (date.getDay() + 6) % 7; } function modelKeyFromParts(provider?: unknown, model?: unknown): ModelKey | null { const p = typeof provider === "string" ? provider.trim() : ""; const m = typeof model === "string" ? model.trim() : ""; if (!p && !m) return null; if (!p) return m; if (!m) return p; return `${p}/${m}`; } function parseSessionStartFromFilename(name: string): Date | null { // Example: 2026-02-02T21-52-28-774Z_.jsonl const m = name.match(/^(\d{4}-\d{2}-\d{2})T(\d{2})-(\d{2})-(\d{2})-(\d{3})Z_/); if (!m) return null; const iso = `${m[1]}T${m[2]}:${m[3]}:${m[4]}.${m[5]}Z`; const d = new Date(iso); return Number.isFinite(d.getTime()) ? d : null; } function extractProviderModelAndUsage(obj: any): { provider?: any; model?: any; modelId?: any; usage?: any } { // Session format varies across versions. // - Newer: { provider, model, usage } on the message wrapper // - Older: { message: { provider, model, usage } } const msg = obj?.message; return { provider: obj?.provider ?? msg?.provider, model: obj?.model ?? msg?.model, modelId: obj?.modelId ?? msg?.modelId, usage: obj?.usage ?? msg?.usage, }; } function extractCostTotal(usage: any): number { if (!usage) return 0; const c = usage?.cost; if (typeof c === "number") return Number.isFinite(c) ? c : 0; if (typeof c === "string") { const n = Number(c); return Number.isFinite(n) ? n : 0; } const t = c?.total; if (typeof t === "number") return Number.isFinite(t) ? t : 0; if (typeof t === "string") { const n = Number(t); return Number.isFinite(n) ? n : 0; } return 0; } function extractTokensTotal(usage: any): number { // Usage format varies across providers and pi versions. // We try a few common shapes: // - { totalTokens } // - { total_tokens } // - { promptTokens, completionTokens } // - { prompt_tokens, completion_tokens } // - { input_tokens, output_tokens } // - { inputTokens, outputTokens } // - { tokens: number | { total } } if (!usage) return 0; const readNum = (v: any): number => { if (typeof v === "number") return Number.isFinite(v) ? v : 0; if (typeof v === "string") { const n = Number(v); return Number.isFinite(n) ? n : 0; } return 0; }; let total = 0; // direct totals total = readNum(usage?.totalTokens) || readNum(usage?.total_tokens) || readNum(usage?.tokens) || readNum(usage?.tokenCount) || readNum(usage?.token_count); if (total > 0) return total; // nested tokens object total = readNum(usage?.tokens?.total) || readNum(usage?.tokens?.totalTokens) || readNum(usage?.tokens?.total_tokens); if (total > 0) return total; // sum of parts const a = readNum(usage?.promptTokens) || readNum(usage?.prompt_tokens) || readNum(usage?.inputTokens) || readNum(usage?.input_tokens); const b = readNum(usage?.completionTokens) || readNum(usage?.completion_tokens) || readNum(usage?.outputTokens) || readNum(usage?.output_tokens); const sum = a + b; return sum > 0 ? sum : 0; } async function walkSessionFiles( root: string, startCutoffLocal: Date, signal?: AbortSignal, onFound?: (found: number) => void, ): Promise { const out: string[] = []; const stack: string[] = [root]; while (stack.length) { if (signal?.aborted) break; const dir = stack.pop()!; let entries: Dirent[] = []; try { entries = await fs.readdir(dir, { withFileTypes: true }); } catch { continue; } for (const ent of entries) { if (signal?.aborted) break; const p = path.join(dir, ent.name); if (ent.isDirectory()) { stack.push(p); continue; } if (!ent.isFile() || !ent.name.endsWith(".jsonl")) continue; // Prefer filename timestamp, else fall back to mtime. const startedAt = parseSessionStartFromFilename(ent.name); if (startedAt) { if (localMidnight(startedAt) >= startCutoffLocal) { out.push(p); if (onFound && out.length % 10 === 0) onFound(out.length); } continue; } try { const st = await fs.stat(p); const approx = new Date(st.mtimeMs); if (localMidnight(approx) >= startCutoffLocal) { out.push(p); if (onFound && out.length % 10 === 0) onFound(out.length); } } catch { // ignore } } } onFound?.(out.length); return out; } async function parseSessionFile(filePath: string, signal?: AbortSignal): Promise { const fileName = path.basename(filePath); let startedAt = parseSessionStartFromFilename(fileName); let currentModel: ModelKey | null = null; const modelsUsed = new Set(); let messages = 0; let tokens = 0; let totalCost = 0; const costByModel = new Map(); const messagesByModel = new Map(); const tokensByModel = new Map(); const stream = createReadStream(filePath, { encoding: "utf8" }); const rl = readline.createInterface({ input: stream, crlfDelay: Infinity }); try { for await (const line of rl) { if (signal?.aborted) { rl.close(); stream.destroy(); return null; } if (!line) continue; let obj: any; try { obj = JSON.parse(line); } catch { continue; } if (!startedAt && obj?.type === "session" && typeof obj?.timestamp === "string") { const d = new Date(obj.timestamp); if (Number.isFinite(d.getTime())) startedAt = d; continue; } if (obj?.type === "model_change") { const mk = modelKeyFromParts(obj.provider, obj.modelId); if (mk) { currentModel = mk; modelsUsed.add(mk); } continue; } if (obj?.type !== "message") continue; const { provider, model, modelId, usage } = extractProviderModelAndUsage(obj); const mk = modelKeyFromParts(provider, model) ?? modelKeyFromParts(provider, modelId) ?? currentModel ?? "unknown"; modelsUsed.add(mk); messages += 1; messagesByModel.set(mk, (messagesByModel.get(mk) ?? 0) + 1); const tok = extractTokensTotal(usage); if (tok > 0) { tokens += tok; tokensByModel.set(mk, (tokensByModel.get(mk) ?? 0) + tok); } const cost = extractCostTotal(usage); if (cost > 0) { totalCost += cost; costByModel.set(mk, (costByModel.get(mk) ?? 0) + cost); } } } finally { rl.close(); stream.destroy(); } if (!startedAt) return null; const dayKeyLocal = toLocalDayKey(startedAt); return { filePath, startedAt, dayKeyLocal, modelsUsed, messages, tokens, totalCost, costByModel, messagesByModel, tokensByModel, }; } function buildRangeAgg(days: number, now: Date): RangeAgg { const end = localMidnight(now); const start = addDaysLocal(end, -(days - 1)); const outDays: DayAgg[] = []; const dayByKey = new Map(); for (let i = 0; i < days; i++) { const d = addDaysLocal(start, i); const dayKeyLocal = toLocalDayKey(d); const day: DayAgg = { date: d, dayKeyLocal, sessions: 0, messages: 0, tokens: 0, totalCost: 0, costByModel: new Map(), sessionsByModel: new Map(), messagesByModel: new Map(), tokensByModel: new Map(), }; outDays.push(day); dayByKey.set(dayKeyLocal, day); } return { days: outDays, dayByKey, sessions: 0, totalMessages: 0, totalTokens: 0, totalCost: 0, modelCost: new Map(), modelSessions: new Map(), modelMessages: new Map(), modelTokens: new Map(), }; } function addSessionToRange(range: RangeAgg, session: ParsedSession): void { const day = range.dayByKey.get(session.dayKeyLocal); if (!day) return; range.sessions += 1; range.totalMessages += session.messages; range.totalTokens += session.tokens; range.totalCost += session.totalCost; day.sessions += 1; day.messages += session.messages; day.tokens += session.tokens; day.totalCost += session.totalCost; // Sessions-per-model (presence) for (const mk of session.modelsUsed) { day.sessionsByModel.set(mk, (day.sessionsByModel.get(mk) ?? 0) + 1); range.modelSessions.set(mk, (range.modelSessions.get(mk) ?? 0) + 1); } // Messages-per-model for (const [mk, n] of session.messagesByModel.entries()) { day.messagesByModel.set(mk, (day.messagesByModel.get(mk) ?? 0) + n); range.modelMessages.set(mk, (range.modelMessages.get(mk) ?? 0) + n); } // Tokens-per-model for (const [mk, n] of session.tokensByModel.entries()) { day.tokensByModel.set(mk, (day.tokensByModel.get(mk) ?? 0) + n); range.modelTokens.set(mk, (range.modelTokens.get(mk) ?? 0) + n); } // Cost-per-model for (const [mk, cost] of session.costByModel.entries()) { day.costByModel.set(mk, (day.costByModel.get(mk) ?? 0) + cost); range.modelCost.set(mk, (range.modelCost.get(mk) ?? 0) + cost); } } function sortMapByValueDesc(m: Map): Array<{ key: K; value: number }> { return [...m.entries()] .map(([key, value]) => ({ key, value })) .sort((a, b) => b.value - a.value); } function choosePaletteFromLast30Days(range30: RangeAgg, topN = 4): { modelColors: Map; otherColor: RGB; orderedModels: ModelKey[]; } { // Prefer cost if any cost exists, else tokens, else messages, else sessions. const costSum = [...range30.modelCost.values()].reduce((a, b) => a + b, 0); const popularity = costSum > 0 ? range30.modelCost : range30.totalTokens > 0 ? range30.modelTokens : range30.totalMessages > 0 ? range30.modelMessages : range30.modelSessions; const sorted = sortMapByValueDesc(popularity); const orderedModels = sorted.slice(0, topN).map((x) => x.key); const modelColors = new Map(); for (let i = 0; i < orderedModels.length; i++) { modelColors.set(orderedModels[i], PALETTE[i % PALETTE.length]); } return { modelColors, otherColor: { r: 160, g: 160, b: 160 }, orderedModels, }; } function dayMixedColor( day: DayAgg, modelColors: Map, otherColor: RGB, mode: MeasurementMode, ): RGB { const parts: Array<{ color: RGB; weight: number }> = []; let otherWeight = 0; let map: Map; if (mode === "tokens") { map = day.tokens > 0 ? day.tokensByModel : day.messages > 0 ? day.messagesByModel : day.sessionsByModel; } else if (mode === "messages") { map = day.messages > 0 ? day.messagesByModel : day.sessionsByModel; } else { map = day.sessionsByModel; } for (const [mk, w] of map.entries()) { const c = modelColors.get(mk); if (c) parts.push({ color: c, weight: w }); else otherWeight += w; } if (otherWeight > 0) parts.push({ color: otherColor, weight: otherWeight }); return weightedMix(parts); } function graphMetricForRange( range: RangeAgg, mode: MeasurementMode, ): { kind: "sessions" | "messages" | "tokens"; max: number; denom: number } { if (mode === "tokens") { const maxTokens = Math.max(0, ...range.days.map((d) => d.tokens)); if (maxTokens > 0) return { kind: "tokens", max: maxTokens, denom: Math.log1p(maxTokens) }; // fall back if tokens aren't available mode = "messages"; } if (mode === "messages") { const maxMessages = Math.max(0, ...range.days.map((d) => d.messages)); if (maxMessages > 0) return { kind: "messages", max: maxMessages, denom: Math.log1p(maxMessages) }; // fall back if messages aren't available mode = "sessions"; } const maxSessions = Math.max(0, ...range.days.map((d) => d.sessions)); return { kind: "sessions", max: maxSessions, denom: Math.log1p(maxSessions) }; } function weeksForRange(range: RangeAgg): number { const days = range.days; const start = days[0].date; const end = days[days.length - 1].date; const gridStart = addDaysLocal(start, -mondayIndex(start)); const gridEnd = addDaysLocal(end, 6 - mondayIndex(end)); const totalGridDays = countDaysInclusiveLocal(gridStart, gridEnd); return Math.ceil(totalGridDays / 7); } function renderGraphLines( range: RangeAgg, modelColors: Map, otherColor: RGB, mode: MeasurementMode, options?: { cellWidth?: number; gap?: number }, ): string[] { const days = range.days; const start = days[0].date; const end = days[days.length - 1].date; const gridStart = addDaysLocal(start, -mondayIndex(start)); const gridEnd = addDaysLocal(end, 6 - mondayIndex(end)); const totalGridDays = countDaysInclusiveLocal(gridStart, gridEnd); const weeks = Math.ceil(totalGridDays / 7); const cellWidth = Math.max(1, Math.floor(options?.cellWidth ?? 1)); const gap = Math.max(0, Math.floor(options?.gap ?? 1)); const block = "█".repeat(cellWidth); const gapStr = " ".repeat(gap); const metric = graphMetricForRange(range, mode); const denom = metric.denom; // Label only Mon/Wed/Fri like GitHub (saves space) const labelByRow = new Map([ [0, "Mon"], [2, "Wed"], [4, "Fri"], ]); const lines: string[] = []; for (let row = 0; row < 7; row++) { const label = labelByRow.get(row); let line = label ? padRight(label, 3) + " " : " "; for (let w = 0; w < weeks; w++) { const cellDate = addDaysLocal(gridStart, w * 7 + row); const inRange = cellDate >= start && cellDate <= end; const colGap = w < weeks - 1 ? gapStr : ""; if (!inRange) { line += " ".repeat(cellWidth) + colGap; continue; } const key = toLocalDayKey(cellDate); const day = range.dayByKey.get(key); const value = metric.kind === "tokens" ? (day?.tokens ?? 0) : metric.kind === "messages" ? (day?.messages ?? 0) : (day?.sessions ?? 0); if (!day || value <= 0) { line += ansiFg(EMPTY_CELL_BG, block) + colGap; continue; } const hue = dayMixedColor(day, modelColors, otherColor, mode); let t = denom > 0 ? Math.log1p(value) / denom : 0; t = clamp01(t); const minVisible = 0.2; const intensity = minVisible + (1 - minVisible) * t; const rgb = mixRgb(DEFAULT_BG, hue, intensity); line += ansiFg(rgb, block) + colGap; } lines.push(line); } return lines; } function displayModelName(modelKey: string): string { const idx = modelKey.indexOf("/"); return idx === -1 ? modelKey : modelKey.slice(idx + 1); } function renderLegendItems(modelColors: Map, orderedModels: ModelKey[], otherColor: RGB): string[] { const items: string[] = []; for (const mk of orderedModels) { const c = modelColors.get(mk); if (!c) continue; items.push(`${ansiFg(c, "█")} ${displayModelName(mk)}`); } items.push(`${ansiFg(otherColor, "█")} other`); return items; } function fitRight(text: string, width: number): string { if (width <= 0) return ""; let w = visibleWidth(text); let t = text; if (w > width) { t = sliceByColumn(t, w - width, width, true); w = visibleWidth(t); } return " ".repeat(Math.max(0, width - w)) + t; } function renderLegendBlock(leftLabel: string, items: string[], width: number): string[] { if (width <= 0) return []; if (items.length === 0) return [truncateToWidth(leftLabel, width)]; const lines: string[] = []; // First line: label on left, first item right-aligned into remaining space. const leftW = visibleWidth(leftLabel); if (leftW >= width) { lines.push(truncateToWidth(leftLabel, width)); // Put all items on their own lines right-aligned. for (const it of items) lines.push(fitRight(it, width)); return lines; } const remaining = Math.max(0, width - leftW); lines.push(leftLabel + fitRight(items[0], remaining)); for (let i = 1; i < items.length; i++) { lines.push(fitRight(items[i], width)); } return lines; } function renderModelTable(range: RangeAgg, mode: MeasurementMode, maxRows = 8): string[] { // Keep this relatively narrow: model + selected metric + cost + share. const metric = graphMetricForRange(range, mode); const kind = metric.kind; let perModel: Map; let total = 0; let label = kind; if (kind === "tokens") { perModel = range.modelTokens; total = range.totalTokens; } else if (kind === "messages") { perModel = range.modelMessages; total = range.totalMessages; } else { perModel = range.modelSessions; total = range.sessions; } const sorted = sortMapByValueDesc(perModel); const rows = sorted.slice(0, maxRows); const valueWidth = kind === "tokens" ? 10 : 8; const modelWidth = Math.min(52, Math.max("model".length, ...rows.map((r) => r.key.length))); const lines: string[] = []; lines.push(`${padRight("model", modelWidth)} ${padLeft(label, valueWidth)} ${padLeft("cost", 10)} ${padLeft("share", 6)}`); lines.push(`${"-".repeat(modelWidth)} ${"-".repeat(valueWidth)} ${"-".repeat(10)} ${"-".repeat(6)}`); for (const r of rows) { const value = perModel.get(r.key) ?? 0; const cost = range.modelCost.get(r.key) ?? 0; const share = total > 0 ? `${Math.round((value / total) * 100)}%` : "0%"; lines.push( `${padRight(r.key.slice(0, modelWidth), modelWidth)} ${padLeft(formatCount(value), valueWidth)} ${padLeft(formatUsd(cost), 10)} ${padLeft(share, 6)}`, ); } if (sorted.length === 0) { lines.push(dim("(no model data found)")); } return lines; } function renderLeftRight(left: string, right: string, width: number): string { const leftW = visibleWidth(left); if (width <= 0) return ""; if (leftW >= width) return truncateToWidth(left, width); const remaining = width - leftW; let rightText = right; const rightW = visibleWidth(rightText); if (rightW > remaining) { // Keep the *rightmost* part visible. rightText = sliceByColumn(rightText, rightW - remaining, remaining, true); } const pad = Math.max(0, remaining - visibleWidth(rightText)); return left + " ".repeat(pad) + rightText; } function rangeSummary(range: RangeAgg, days: number, mode: MeasurementMode): string { const avg = range.sessions > 0 ? range.totalCost / range.sessions : 0; const costPart = range.totalCost > 0 ? `${formatUsd(range.totalCost)} · avg ${formatUsd(avg)}/session` : `$0.0000`; if (mode === "tokens") { return `Last ${days} days: ${formatCount(range.sessions)} sessions · ${formatCount(range.totalTokens)} tokens · ${costPart}`; } if (mode === "messages") { return `Last ${days} days: ${formatCount(range.sessions)} sessions · ${formatCount(range.totalMessages)} messages · ${costPart}`; } return `Last ${days} days: ${formatCount(range.sessions)} sessions · ${costPart}`; } async function computeBreakdown( signal?: AbortSignal, onProgress?: (update: Partial) => void, ): Promise { const now = new Date(); const ranges = new Map(); for (const d of RANGE_DAYS) ranges.set(d, buildRangeAgg(d, now)); const range90 = ranges.get(90)!; const start90 = range90.days[0].date; onProgress?.({ phase: "scan", foundFiles: 0, parsedFiles: 0, totalFiles: 0, currentFile: undefined }); const candidates = await walkSessionFiles(SESSION_ROOT, start90, signal, (found) => { onProgress?.({ phase: "scan", foundFiles: found }); }); const totalFiles = candidates.length; onProgress?.({ phase: "parse", foundFiles: totalFiles, totalFiles, parsedFiles: 0, currentFile: totalFiles > 0 ? path.basename(candidates[0]!) : undefined, }); let parsedFiles = 0; for (const filePath of candidates) { if (signal?.aborted) break; parsedFiles += 1; onProgress?.({ phase: "parse", parsedFiles, totalFiles, currentFile: path.basename(filePath) }); const session = await parseSessionFile(filePath, signal); if (!session) continue; const sessionDay = localMidnight(session.startedAt); for (const d of RANGE_DAYS) { const range = ranges.get(d)!; const start = range.days[0].date; const end = range.days[range.days.length - 1].date; if (sessionDay < start || sessionDay > end) continue; addSessionToRange(range, session); } } onProgress?.({ phase: "finalize", currentFile: undefined }); const palette = choosePaletteFromLast30Days(ranges.get(30)!, 4); return { generatedAt: now, ranges, palette }; } class BreakdownComponent implements Component { private data: BreakdownData; private tui: TUI; private onDone: () => void; private rangeIndex = 1; // default 30d private measurement: MeasurementMode = "sessions"; private cachedWidth?: number; private cachedLines?: string[]; constructor(data: BreakdownData, tui: TUI, onDone: () => void) { this.data = data; this.tui = tui; this.onDone = onDone; } invalidate(): void { this.cachedWidth = undefined; this.cachedLines = undefined; } handleInput(data: string): void { if (matchesKey(data, Key.escape) || matchesKey(data, Key.ctrl("c")) || data.toLowerCase() === "q") { this.onDone(); return; } if (matchesKey(data, Key.tab) || matchesKey(data, Key.shift("tab")) || data.toLowerCase() === "t") { const order: MeasurementMode[] = ["sessions", "messages", "tokens"]; const idx = Math.max(0, order.indexOf(this.measurement)); const dir = matchesKey(data, Key.shift("tab")) ? -1 : 1; this.measurement = order[(idx + order.length + dir) % order.length] ?? "sessions"; this.invalidate(); this.tui.requestRender(); return; } const prev = () => { this.rangeIndex = (this.rangeIndex + RANGE_DAYS.length - 1) % RANGE_DAYS.length; this.invalidate(); this.tui.requestRender(); }; const next = () => { this.rangeIndex = (this.rangeIndex + 1) % RANGE_DAYS.length; this.invalidate(); this.tui.requestRender(); }; if (matchesKey(data, Key.left) || data.toLowerCase() === "h") prev(); if (matchesKey(data, Key.right) || data.toLowerCase() === "l") next(); if (data === "1") { this.rangeIndex = 0; this.invalidate(); this.tui.requestRender(); } if (data === "2") { this.rangeIndex = 1; this.invalidate(); this.tui.requestRender(); } if (data === "3") { this.rangeIndex = 2; this.invalidate(); this.tui.requestRender(); } } render(width: number): string[] { if (this.cachedWidth === width && this.cachedLines) return this.cachedLines; const selectedDays = RANGE_DAYS[this.rangeIndex]; const range = this.data.ranges.get(selectedDays)!; const metric = graphMetricForRange(range, this.measurement); const tab = (days: number, idx: number): string => { const selected = idx === this.rangeIndex; const label = `${days}d`; return selected ? bold(`[${label}]`) : dim(` ${label} `); }; const metricTab = (mode: MeasurementMode, label: string): string => { const selected = mode === this.measurement; return selected ? bold(`[${label}]`) : dim(` ${label} `); }; const header = `${bold("Session breakdown")} ${tab(7, 0)} ${tab(30, 1)} ${tab(90, 2)} ` + `${metricTab("sessions", "sess")} ${metricTab("messages", "msg")} ${metricTab("tokens", "tok")}`; const legendItems = renderLegendItems( this.data.palette.modelColors, this.data.palette.orderedModels, this.data.palette.otherColor, ); const summary = rangeSummary(range, selectedDays, metric.kind) + dim(` (graph: ${metric.kind}/day)`); const maxScale = selectedDays === 7 ? 4 : selectedDays === 30 ? 3 : 2; const weeks = weeksForRange(range); const leftMargin = 4; // "Mon " (or 4 spaces) const gap = 1; const graphArea = Math.max(1, width - leftMargin); // Each week column uses: cellWidth + gap. Last column also gets gap (fine; we truncate anyway). const idealCellWidth = Math.floor((graphArea + gap) / Math.max(1, weeks)) - gap; const cellWidth = Math.min(maxScale, Math.max(1, idealCellWidth)); const graphLines = renderGraphLines( range, this.data.palette.modelColors, this.data.palette.otherColor, this.measurement, { cellWidth, gap }, ); const tableLines = renderModelTable(range, metric.kind, 8); const lines: string[] = []; lines.push(truncateToWidth(header, width)); lines.push(truncateToWidth(dim("←/→ range · tab metric · q to close"), width)); lines.push(""); lines.push(truncateToWidth(summary, width)); lines.push(""); // Render legend on the RIGHT of the graph if there is space. const graphWidth = Math.max(0, ...graphLines.map((l) => visibleWidth(l))); const sep = 2; const legendWidth = width - graphWidth - sep; const showSideLegend = legendWidth >= 22; if (showSideLegend) { const legendBlock: string[] = []; legendBlock.push(dim("Top models (30d palette):")); legendBlock.push(...legendItems); // Fit into 7 rows (same as graph). If too many, show a final "+N more" line. const maxLegendRows = graphLines.length; let legendLines = legendBlock.slice(0, maxLegendRows); if (legendBlock.length > maxLegendRows) { const remaining = legendBlock.length - (maxLegendRows - 1); legendLines = [...legendBlock.slice(0, maxLegendRows - 1), dim(`+${remaining} more`)]; } while (legendLines.length < graphLines.length) legendLines.push(""); const padRightAnsi = (s: string, target: number): string => { const w = visibleWidth(s); return w >= target ? s : s + " ".repeat(target - w); }; for (let i = 0; i < graphLines.length; i++) { const left = padRightAnsi(graphLines[i] ?? "", graphWidth); const right = truncateToWidth(legendLines[i] ?? "", Math.max(0, legendWidth)); lines.push(truncateToWidth(left + " ".repeat(sep) + right, width)); } } else { // Fallback: graph only (legend will be shown below). for (const gl of graphLines) lines.push(truncateToWidth(gl, width)); lines.push(""); // Compact legend below, left-aligned. lines.push(truncateToWidth(dim("Top models (30d palette):"), width)); for (const it of legendItems) lines.push(truncateToWidth(it, width)); } lines.push(""); for (const tl of tableLines) lines.push(truncateToWidth(tl, width)); // Ensure no overly long lines (truncateToWidth already), but keep at least 1 line. this.cachedWidth = width; this.cachedLines = lines.map((l) => (visibleWidth(l) > width ? truncateToWidth(l, width) : l)); return this.cachedLines; } } export default function sessionBreakdownExtension(pi: ExtensionAPI) { pi.registerCommand("session-breakdown", { description: "Interactive breakdown of last 7/30/90 days of ~/.pi session usage (sessions/messages/tokens + cost by model)", handler: async (_args, ctx: ExtensionContext) => { if (!ctx.hasUI) { // Non-interactive fallback: just notify. const data = await computeBreakdown(undefined); const range = data.ranges.get(30)!; pi.sendMessage( { customType: "session-breakdown", content: `Session breakdown (non-interactive)\n${rangeSummary(range, 30, "sessions")}`, display: true, }, { triggerTurn: false }, ); return; } let aborted = false; const data = await ctx.ui.custom((tui, theme, _kb, done) => { const baseMessage = "Analyzing sessions (last 90 days)…"; const loader = new BorderedLoader(tui, theme, baseMessage); const startedAt = Date.now(); const progress: BreakdownProgressState = { phase: "scan", foundFiles: 0, parsedFiles: 0, totalFiles: 0, currentFile: undefined, }; const renderMessage = (): string => { const elapsed = ((Date.now() - startedAt) / 1000).toFixed(1); if (progress.phase === "scan") { return `${baseMessage} scanning (${formatCount(progress.foundFiles)} files) · ${elapsed}s`; } if (progress.phase === "parse") { return `${baseMessage} parsing (${formatCount(progress.parsedFiles)}/${formatCount(progress.totalFiles)}) · ${elapsed}s`; } return `${baseMessage} finalizing · ${elapsed}s`; }; let intervalId: NodeJS.Timeout | null = null; const stopTicker = () => { if (intervalId) { clearInterval(intervalId); intervalId = null; } }; // Update every 0.5s so long-running scans show some visible progress. setBorderedLoaderMessage(loader, renderMessage()); intervalId = setInterval(() => { setBorderedLoaderMessage(loader, renderMessage()); }, 500); loader.onAbort = () => { aborted = true; stopTicker(); done(null); }; computeBreakdown(loader.signal, (update) => Object.assign(progress, update)) .then((d) => { stopTicker(); if (!aborted) done(d); }) .catch((err) => { stopTicker(); console.error("session-breakdown: failed to analyze sessions", err); if (!aborted) done(null); }); return loader; }); if (!data) { ctx.ui.notify(aborted ? "Cancelled" : "Failed to analyze sessions", aborted ? "info" : "error"); return; } await ctx.ui.custom((tui, _theme, _kb, done) => { return new BreakdownComponent(data, tui, done); }); }, }); } ================================================ FILE: agents/pi/extensions/webfetch.ts ================================================ /** * WebFetch Extension * * Fetches web content and converts to markdown, text, or HTML for agent consumption. * Use when URLs are mentioned in conversation to retrieve their content. */ import type { ExtensionAPI } from "@mariozechner/pi-coding-agent"; import { Type } from "@sinclair/typebox"; import { StringEnum } from "@mariozechner/pi-ai"; import TurndownService from "turndown"; const MAX_RESPONSE_SIZE = 5 * 1024 * 1024; // 5MB const DEFAULT_TIMEOUT = 30 * 1000; // 30 seconds const MAX_TIMEOUT = 120 * 1000; // 2 minutes // Turndown instance for HTML to Markdown conversion const turndownService = new TurndownService({ headingStyle: "atx", hr: "---", bulletListMarker: "-", codeBlockStyle: "fenced", emDelimiter: "*", }); // Remove script/style/meta/link/media tags turndownService.remove([ "script", "style", "meta", "link", "noscript", "img", "svg", "picture", "figure", "canvas", "video", "audio", "source", "track", "embed", "object", "iframe", ]); export default function (pi: ExtensionAPI) { pi.registerTool({ name: "webfetch", label: "WebFetch", description: "Fetch web content and convert to markdown, text, or HTML for analysis. " + "Use when URLs are mentioned in conversation to retrieve their content. " + "HTTP URLs are automatically upgraded to HTTPS. " + "Images and binary content return empty text.", parameters: Type.Object({ url: Type.String({ description: "The URL to fetch content from" }), format: Type.Optional( StringEnum(["markdown", "text", "html"] as const, { description: "Output format: markdown (default), text, or html", }), ), timeout: Type.Optional( Type.Number({ description: "Timeout in seconds (max 120, default 30)", }), ), }), async execute(_toolCallId, params, signal, _onUpdate, ctx) { const format = params.format ?? "markdown"; let url = params.url; // Validate and normalize URL if (url.startsWith("http://")) { url = url.replace("http://", "https://"); } if (!url.startsWith("https://")) { ctx.ui.notify("Invalid URL: must start with http:// or https://", "error"); return { content: [{ type: "text", text: "" }], details: { url: params.url, format, error: "Invalid URL" }, }; } const timeout = Math.min( (params.timeout ?? DEFAULT_TIMEOUT / 1000) * 1000, MAX_TIMEOUT, ); // Create abort controller that combines signal and timeout const controller = new AbortController(); const timeoutId = setTimeout(() => controller.abort(), timeout); // Link external signal if provided if (signal) { signal.addEventListener("abort", () => controller.abort()); } try { const response = await fetch(url, { signal: controller.signal, headers: { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", Accept: "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Accept-Language": "en-US,en;q=0.9", }, }); clearTimeout(timeoutId); if (!response.ok) { ctx.ui.notify(`HTTP error ${response.status}: ${response.statusText}`, "error"); return { content: [{ type: "text", text: "" }], details: { url, format, error: `HTTP ${response.status}`, }, }; } // Check content length header const contentLength = response.headers.get("content-length"); if (contentLength && parseInt(contentLength) > MAX_RESPONSE_SIZE) { ctx.ui.notify("Response too large (>5MB)", "error"); return { content: [{ type: "text", text: "" }], details: { url, format, error: "Response too large" }, }; } const contentType = response.headers.get("content-type") || ""; const mime = contentType.split(";")[0]?.trim().toLowerCase() || ""; // Skip binary/image content - return empty for agent if ( mime.startsWith("image/") || mime.startsWith("video/") || mime.startsWith("audio/") || mime.startsWith("application/pdf") || mime.startsWith("application/zip") || mime.startsWith("application/octet-stream") ) { return { content: [{ type: "text", text: "" }], details: { url, format, contentType: mime, skipped: true }, }; } const arrayBuffer = await response.arrayBuffer(); if (arrayBuffer.byteLength > MAX_RESPONSE_SIZE) { ctx.ui.notify("Response too large (>5MB)", "error"); return { content: [{ type: "text", text: "" }], details: { url, format, error: "Response too large" }, }; } let text = new TextDecoder().decode(arrayBuffer); if (mime.includes("text/html")) { text = stripMediaTags(text); } // Process based on format let output = ""; switch (format) { case "markdown": { if (mime.includes("text/html")) { output = turndownService.turndown(text); } else { output = text; } break; } case "text": { if (mime.includes("text/html")) { output = htmlToText(text); } else { output = text; } break; } case "html": { output = text; break; } } // Clean up whitespace output = output.trim(); return { content: [{ type: "text", text: output }], details: { url, format, contentType: mime }, }; } catch (error) { clearTimeout(timeoutId); if (error instanceof Error) { if (error.name === "AbortError") { ctx.ui.notify("Request timed out or was cancelled", "error"); return { content: [{ type: "text", text: "" }], details: { url, format, error: "Timeout or cancelled" }, }; } ctx.ui.notify(`Fetch error: ${error.message}`, "error"); return { content: [{ type: "text", text: "" }], details: { url, format, error: error.message }, }; } ctx.ui.notify("Unknown error occurred", "error"); return { content: [{ type: "text", text: "" }], details: { url, format, error: "Unknown error" }, }; } }, }); } // Strip media and embed tags from raw HTML before conversion function stripMediaTags(html: string): string { return ( html .replace(/]*>[\s\S]*?<\/script>/gi, "") .replace(/]*>[\s\S]*?<\/style>/gi, "") .replace(//gi, "") .replace(/]*>/gi, "") .replace(//gi, "") .replace(//gi, "") .replace(//gi, "") .replace(//gi, "") .replace(//gi, "") .replace(//gi, "") .replace(//gi, "") .replace(/]*>/gi, "") .replace(/]*>/gi, "") .replace(/]*>/gi, "") ); } // Simple HTML to text conversion (no turndown for text format) function htmlToText(html: string): string { return ( html // Replace common block elements with newlines .replace(/<\/p>/gi, "\n\n") .replace(//gi, "\n") .replace(/<\/div>/gi, "\n") .replace(/<\/h[1-6]>/gi, "\n\n") .replace(/<\/li>/gi, "\n") // Remove all remaining tags .replace(/<[^>]+>/g, "") // Decode common HTML entities .replace(/ /g, " ") .replace(/&/g, "&") .replace(/</g, "<") .replace(/>/g, ">") .replace(/"/g, '"') .replace(/'/g, "'") // Clean up whitespace .replace(/\n{3,}/g, "\n\n") .trim() ); } ================================================ FILE: agents/pi/extensions/whimsical-timer.ts ================================================ import type { AssistantMessage } from "@mariozechner/pi-ai"; import type { ExtensionAPI } from "@mariozechner/pi-coding-agent"; const messages = [ "Schlepping...", "Combobulating...", "Doing...", "Channelling...", "Vibing...", "Concocting...", "Spelunking...", "Transmuting...", "Imagining...", "Pontificating...", "Whirring...", "Cogitating...", "Honking...", "Noodling...", "Percolating...", "Ruminating...", "Simmering...", "Marinating...", "Fermenting...", "Gestating...", "Hatching...", "Brewing...", "Steeping...", "Contemplating...", "Musing...", "Pondering...", "Dithering...", "Faffing...", "Puttering...", "Tinkering...", "Wrangling...", "Discombobulating...", "Recombobulating...", "Befuddling...", "Snorkeling...", "Yodeling...", "Zigzagging...", "Somersaulting...", "Canoodling...", "Schmoozing...", "Skedaddling...", "Scampering...", "Swashbuckling...", "Effervescing...", "Bubbling...", "Enchanting...", "Mesmerizing...", "Sparkling...", "Scintillating...", "Synthesizing...", "Procrastinating...", "Dillydallying...", "Lollygagging...", "Sleuthing...", "Rummaging...", "Foraging...", "Vamoosing...", "Absconding...", "Jamming...", "Freestyling...", "Frolicking...", "Wibbling...", "Wobbling...", "Bonking...", "Squelching...", "Burbling...", "Whooshing...", "Clunking...", "Rustling...", "Bustling...", "Doodling...", "Squiggling...", "Slithering...", "Bouldering...", "Tottering...", "Jittering...", "Twittering...", "Chattering...", "Splattering...", "Hammering...", "Stammering...", "Shimmering...", "Glimmering...", "Skimming...", "Drumming...", "Fumbling...", "Grumbling...", "Mumbling...", "Rumbling...", "Stumbling...", "Crumbling...", "Tangling...", "Jangling...", "Mingling...", "Jingling...", "Bribing the compiler...", "Tickling the stack...", "Massaging the heap...", "Summoning semicolons...", "Herding pointers...", "Untangling spaghetti...", "Polishing the algorithms...", "Waxing philosophical...", "Reading tea leaves...", "Warming up the hamsters...", "Caffeinating...", "Squinting at the problem...", "Staring into the abyss...", "Communing with the machine spirit...", "Reticulating splines...", "Calibrating the flux capacitor...", ]; let agentStartMs: number | null = null; let intervalMs: number | null = null; let timerInterval: ReturnType | null = null; let currentMessage: string | null = null; function pickRandom(): string { return messages[Math.floor(Math.random() * messages.length)]; } function formatDuration(ms: number): string { const seconds = Math.floor(ms / 1000); const mins = Math.floor(seconds / 60); const secs = seconds % 60; if (mins > 0) return String(mins) + "m " + String(secs) + "s"; return String(secs) + "s"; } function isAssistantMessage(message: unknown): message is AssistantMessage { if (!message || typeof message !== "object") return false; return (message as { role?: unknown }).role === "assistant"; } export default function (pi: ExtensionAPI) { pi.on("agent_start", () => { agentStartMs = Date.now(); }); pi.on("turn_start", async (_event, ctx) => { intervalMs = Date.now(); currentMessage = pickRandom(); ctx.ui.setWorkingMessage(currentMessage); timerInterval = setInterval(() => { if (intervalMs === null || currentMessage === null) return; const elapsedMs = Date.now() - intervalMs; ctx.ui.setWorkingMessage(currentMessage + " (" + formatDuration(elapsedMs) + ")"); }, 1000); }); pi.on("turn_end", async (_event, ctx) => { currentMessage = null; ctx.ui.setWorkingMessage(); }); pi.on("agent_end", (event, ctx) => { if (!ctx.hasUI) return; if (agentStartMs === null) return; intervalMs = null; if (timerInterval !== null) { clearInterval(timerInterval); timerInterval = null; } const elapsedMs = Date.now() - agentStartMs; agentStartMs = null; if (elapsedMs <= 0) return; let output = 0; for (const msg of event.messages) { if (!isAssistantMessage(msg)) continue; output += msg.usage.output || 0; } if (output <= 0) return; const tps = (output / (elapsedMs / 1000)).toFixed(1); const msg = "TPS: " + tps + " tok/s took " + formatDuration(elapsedMs); ctx.ui.notify(msg, "info"); }); } ================================================ FILE: agents/pi/mcp.json ================================================ { "mcpServers": { "exa": { "url": "https://mcp.exa.ai/mcp" }, "fff": { "command": "fff-mcp", "directTools": true, "lifecycle": "keep-alive" } } } ================================================ FILE: agents/pi/models.json ================================================ { "providers": { "openrouter": { "modelOverrides": { "minimax/minimax-m2.5": { "name": "Minimax M2.5 RR", "compat": { "openRouterRouting": { "only": ["sambanova", "fireworks", "friendli"] } } }, "moonshotai/kimi-k2.5": { "name": "Kimi K2.5 RR", "compat": { "openRouterRouting": { "only": ["fireworks", "together"] } } }, "z-ai/glm-5": { "name": "GLM 5 RR", "compat": { "openRouterRouting": { "only": ["together", "friendli"] } } } } }, "opencode-go": { "apiKey": "!pass show opencode-go", "baseUrl": "https://opencode.ai/zen/go/v1", "models": [ { "id": "minimax-m2.7", "name": "MiniMax M2.7", "api": "anthropic-messages", "provider": "opencode-go", "baseUrl": "https://opencode.ai/zen/go", "reasoning": true, "input": ["text"], "cost": { "input": 0.3, "output": 1.2, "cacheRead": 0.03, "cacheWrite": 0 }, "contextWindow": 204800, "maxTokens": 131072 }, { "id": "glm-5", "name": "GLM 5", "api": "openai-completions", "provider": "opencode-go", "reasoning": true, "input": ["text"], "cost": { "input": 1, "output": 3.2, "cacheRead": 0.2, "cacheWrite": 0 }, "contextWindow": 204800, "maxTokens": 131072 }, { "id": "kimi-k2.5", "name": "Kimi K2.5", "api": "openai-completions", "provider": "opencode-go", "reasoning": true, "input": ["text", "image"], "cost": { "input": 0.6, "output": 3.0, "cacheRead": 0.1, "cacheWrite": 0 }, "contextWindow": 256000, "maxTokens": 256000 } ] } } } ================================================ FILE: agents/pi/package.json ================================================ { "name": "nil", "peerDependencies": { "@mariozechner/pi-ai": "*", "@mariozechner/pi-coding-agent": "*", "@mariozechner/pi-tui": "*", "@sinclair/typebox": "*", "turndown": "^7.2.2", "koffi": "^2.11.0" }, "private": true, "type": "module" } ================================================ FILE: agents/pi/settings.json ================================================ { "lastChangelogVersion": "0.70.6", "defaultProvider": "deepseek", "defaultModel": "deepseek-v4-flash", "defaultThinkingLevel": "minimal", "theme": "rose-pine-dawn", "quietStartup": true, "packages": [ "./extensions/pi-codemode", "npm:@plannotator/pi-extension", "git:github.com/SamuelLHuber/pi-fff", "npm:pi-boomerang" ], "editorPaddingX": 0, "autocompleteMaxVisible": 10, "transport": "auto", "doubleEscapeAction": "tree", "enabledModels": [], "hideThinkingBlock": false } ================================================ FILE: agents/pi/themes/rose-pine-dawn.json ================================================ { "$schema": "https://raw.githubusercontent.com/badlogic/pi-mono/main/packages/coding-agent/src/modes/interactive/theme/theme-schema.json", "name": "rose-pine-dawn", "vars": { "base": "#faf4ed", "surface": "#fffaf3", "overlay": "#f2e9e1", "muted": "#9893a5", "subtle": "#797593", "text": "#575279", "love": "#b4637a", "gold": "#ea9d34", "rose": "#d7827e", "pine": "#286983", "foam": "#56949f", "iris": "#907aa9" }, "colors": { "accent": "iris", "border": "subtle", "borderAccent": "pine", "borderMuted": "overlay", "success": "pine", "error": "love", "warning": "gold", "muted": "muted", "dim": "subtle", "text": "text", "thinkingText": "muted", "selectedBg": "overlay", "userMessageBg": "surface", "userMessageText": "text", "customMessageBg": "overlay", "customMessageText": "text", "customMessageLabel": "iris", "toolPendingBg": "overlay", "toolSuccessBg": "surface", "toolErrorBg": "overlay", "toolTitle": "iris", "toolOutput": "muted", "mdHeading": "rose", "mdLink": "foam", "mdLinkUrl": "subtle", "mdCode": "iris", "mdCodeBlock": "text", "mdCodeBlockBorder": "overlay", "mdQuote": "muted", "mdQuoteBorder": "subtle", "mdHr": "overlay", "mdListBullet": "rose", "toolDiffAdded": "pine", "toolDiffRemoved": "love", "toolDiffContext": "muted", "syntaxComment": "muted", "syntaxKeyword": "pine", "syntaxFunction": "rose", "syntaxVariable": "gold", "syntaxString": "foam", "syntaxNumber": "gold", "syntaxType": "iris", "syntaxOperator": "pine", "syntaxPunctuation": "subtle", "thinkingOff": "muted", "thinkingMinimal": "subtle", "thinkingLow": "pine", "thinkingMedium": "foam", "thinkingHigh": "iris", "thinkingXhigh": "love", "bashMode": "pine" } } ================================================ FILE: agents/pi/treesitter.ts ================================================ import path from "node:path"; import fs from "node:fs/promises"; import { createWriteStream } from "node:fs"; import os from "node:os"; import https from "node:https"; import Parser from "tree-sitter"; import { createPicker } from "../create-picker"; import type { PickerContext } from "../types"; interface ParserConfig { /** Language identifier (e.g., "typescript", "rust") */ language: string; /** File extensions to scan (e.g., [".ts", ".tsx"]) */ extensions: string[]; /** URL to download the parser WASM */ url?: string; /** Optional local path to parser (instead of URL) */ localPath?: string; } interface Symbol { name: string; kind: string; file: string; line: number; column: number; } /** Simple LRU cache with max size limit */ class LRUCache { private cache = new Map(); private maxSize: number; constructor(maxSize: number) { this.maxSize = maxSize; } get(key: K): V | undefined { const value = this.cache.get(key); if (value !== undefined) { // Move to end (most recently used) this.cache.delete(key); this.cache.set(key, value); } return value; } set(key: K, value: V): void { if (this.cache.has(key)) { this.cache.delete(key); } else if (this.cache.size >= this.maxSize) { // Remove least recently used (first item) const firstKey = this.cache.keys().next().value; if (firstKey !== undefined) { this.cache.delete(firstKey); } } this.cache.set(key, value); } has(key: K): boolean { return this.cache.has(key); } clear(): void { this.cache.clear(); } get size(): number { return this.cache.size; } } // Cache for parsed symbols per language - limited to 10 entries to prevent unbounded growth const symbolCache = new LRUCache(10); const runtimeCache = new Map(); let parsersDir: string | null = null; let queriesDir: string | null = null; async function getParsersDir(): Promise { if (!parsersDir) { parsersDir = path.join(os.homedir(), ".pi", "agent", "parsers"); await fs.mkdir(parsersDir, { recursive: true }); } return parsersDir; } async function getQueriesDir(): Promise { if (!queriesDir) { queriesDir = path.join(os.homedir(), ".pi", "agent", "queries"); await fs.mkdir(queriesDir, { recursive: true }); } return queriesDir; } async function downloadFile(url: string, dest: string): Promise { return new Promise((resolve) => { const file = createWriteStream(dest); https .get(url, (res) => { if (res.statusCode && res.statusCode >= 300 && res.statusCode < 400 && res.headers.location) { downloadFile(res.headers.location, dest).then(resolve); return; } if (res.statusCode !== 200) { resolve(false); return; } res.pipe(file); file.on("finish", () => { file.close(() => resolve(true)); }); }) .on("error", () => { fs.unlink(dest).catch(() => {}); resolve(false); }); }); } async function ensureParser(config: ParserConfig): Promise { if (config.localPath) { try { await fs.access(config.localPath); return config.localPath; } catch { // local path doesn't exist, continue } } if (!config.url) { return null; } const parsersDir = await getParsersDir(); const fileName = `${config.language}.wasm`; const destPath = path.join(parsersDir, fileName); try { await fs.access(destPath); return destPath; } catch { // doesn't exist, download it } const success = await downloadFile(config.url, destPath); return success ? destPath : null; } async function walkDir(dir: string, extensions: string[], files: string[] = []): Promise { try { const entries = await fs.readdir(dir, { withFileTypes: true }); for (const entry of entries) { const fullPath = path.join(dir, entry.name); if (entry.isDirectory()) { if (entry.name === "node_modules" || entry.name === ".git" || entry.name.startsWith(".")) { continue; } await walkDir(fullPath, extensions, files); } else if (entry.isFile()) { const ext = path.extname(entry.name); if (extensions.includes(ext)) { files.push(fullPath); } } } } catch (err) { console.error("[pi-ckers] Error walking directory:", err); } return files; } async function findFiles(cwd: string, extensions: string[]): Promise { return walkDir(cwd, extensions); } async function readQueryFile(filePath: string): Promise { try { return await fs.readFile(filePath, "utf8"); } catch { return null; } } /** Check if a path is a directory */ async function isDirectory(filePath: string): Promise { try { const stat = await fs.stat(filePath); return stat.isDirectory(); } catch { return false; } } /** Default discovery: looks for {language}.scm in search paths, then default dir */ async function defaultDiscoverQuery( language: string, searchPaths: string[], defaultDir: string ): Promise { // Search custom paths first (priority order) for (const searchPath of searchPaths) { const isDir = await isDirectory(searchPath); const queryFile = isDir ? path.join(searchPath, `${language}.scm`) : searchPath; const content = await readQueryFile(queryFile); if (content) return content; } // Fallback to default queries dir const queryFile = path.join(defaultDir, `${language}.scm`); return readQueryFile(queryFile); } async function loadQuery( language: string, queriesDirs: string[] | QueryDiscovery | undefined ): Promise { const defaultDir = await getQueriesDir(); // If it's a function, use it as custom discovery if (typeof queriesDirs === "function") { return queriesDirs({ language, defaultDir, readFile: readQueryFile, }); } // Otherwise use default discovery with array of paths return defaultDiscoverQuery(language, queriesDirs ?? [], defaultDir); } async function parseFileWithTreesitter( filePath: string, runtime: { parser: Parser; query: Parser.Query }, ): Promise { const symbols: Symbol[] = []; try { const sourceCode = await fs.readFile(filePath, "utf8"); const tree = runtime.parser.parse(sourceCode); const matches = runtime.query.matches(tree.rootNode); for (const match of matches) { for (const capture of match.captures) { const node = capture.node; const captureName = capture.name; symbols.push({ name: node.text, kind: captureName, file: filePath, line: node.startPosition.row + 1, column: node.startPosition.column, }); } } } catch (err) { // If tree-sitter fails, return empty console.error(`[pi-ckers] Error parsing file ${filePath}:`, err); } return symbols; } async function parseWorkspace( config: ParserConfig, cwd: string, maxFiles: number, notify: (msg: string, type: "info" | "error") => void, queriesDirs: string[] | QueryDiscovery | undefined ): Promise { const cacheKey = `${config.language}:${cwd}`; if (symbolCache.has(cacheKey)) { return symbolCache.get(cacheKey)!; } // Load query file - required const queryText = await loadQuery(config.language, queriesDirs); if (!queryText) { // No query file, no symbols return []; } // Ensure parser is available const parserPath = await ensureParser(config); if (!parserPath) { notify(`Tree-sitter parser not available for ${config.language}`, "error"); return []; } const files = await findFiles(cwd, config.extensions); const allSymbols: Symbol[] = []; let runtime = runtimeCache.get(config.language); if (!runtime) { const parser = new Parser(); const loadedLanguage = await Parser.Language.load(parserPath); parser.setLanguage(loadedLanguage); runtime = { parser, query: loadedLanguage.query(queryText), }; runtimeCache.set(config.language, runtime); } for (const file of files.slice(0, maxFiles)) { const symbols = await parseFileWithTreesitter(file, runtime); allSymbols.push(...symbols); } symbolCache.set(cacheKey, allSymbols); return allSymbols; } function filterSymbols(symbols: Symbol[], query: string): Symbol[] { const lowerQuery = query.toLowerCase(); return symbols .filter((s) => s.name.toLowerCase().includes(lowerQuery)) .slice(0, 20); } function formatSymbolValue(symbol: Symbol, isQuotedPrefix: boolean): string { const fileName = path.basename(symbol.file); const location = `${fileName}:${symbol.line}`; const value = `${symbol.name} (${location})`; if (!isQuotedPrefix && !value.includes(" ")) { return `@ts:${value}`; } return `@ts:"${value}"`; } export interface QueryDiscoveryContext { /** Language identifier (e.g., "typescript") */ language: string; /** Default fallback directory (~/.pi/agent/queries) */ defaultDir: string; /** Read file helper - returns null if not found */ readFile: (filePath: string) => Promise; } /** Custom function to discover query files. Return the query text or null. */ export type QueryDiscovery = (ctx: QueryDiscoveryContext) => Promise; export interface TreesitterPickerOptions { /** Parser configurations for different languages */ parsers: ParserConfig[]; /** Maximum number of files to scan per language (default: 100) */ maxFiles?: number; /** Refresh interval in milliseconds (default: 60000) */ refreshInterval?: number; /** Directories or files to search for query files, or a function that returns them. * * - string[]: Array of directories or file paths to search. For directories, * looks for {language}.scm. For files, reads them directly. * - function: Custom discovery that returns the query text directly. * * Falls back to ~/.pi/agent/queries/{language}.scm if not found. */ queriesDirs?: string[] | QueryDiscovery; } /** * Create a treesitter workspace symbols picker for @ts: completions. * * Query files are loaded from ~/.pi/agent/queries/{language}.scm by default. * * Use queriesDirs to customize: * - string[]: Directories or specific files to search * - function: Custom discovery logic returning query text directly * * Parsers are downloaded on-demand to ~/.pi/agent/parsers/ * * @example * ```typescript * import { tsWorkspaceSymbolsPicker } from "@elianiva/pi-ckers/builtin/treesitter"; * import path from "node:path"; * * // Simple: Custom directories (looks for {language}.scm in each) * const picker = tsWorkspaceSymbolsPicker({ * parsers: [...], * queriesDirs: ["/home/user/.config/nvim/queries"] * }); * * // Advanced: Custom discovery for nvim-treesitter structure * const nvimPicker = tsWorkspaceSymbolsPicker({ * parsers: [...], * queriesDirs: async ({ language, defaultDir, readFile }) => { * // Look for locals.scm in nvim-treesitter structure * const nvimPath = path.join( * "/Users/elianiva/.local/share/nvim/lazy/nvim-treesitter/queries", * language, * "locals.scm" * ); * const content = await readFile(nvimPath); * if (content) return content; * * // Fallback to default * return readFile(path.join(defaultDir, `${language}.scm`)); * } * }); * ``` */ export const tsWorkspaceSymbolsPicker = (options: TreesitterPickerOptions) => { const maxFiles = options.maxFiles ?? 100; const refreshInterval = options.refreshInterval ?? 60000; const queriesDirs = options.queriesDirs; let refreshTimer: ReturnType | null = null; let notify: (msg: string, type: "info" | "error") => void = console.error; let currentCwd: string = ""; return createPicker({ type: "sync", prefix: "@ts:", minQueryLength: 1, init: async (ctx) => { currentCwd = ctx.cwd; notify = ctx.ui.notify.bind(ctx.ui); await getQueriesDir(); await getParsersDir(); for (const config of options.parsers) { await parseWorkspace(config, ctx.cwd, maxFiles, notify, queriesDirs); } if (refreshTimer) { clearInterval(refreshTimer); } refreshTimer = setInterval(async () => { if (!currentCwd) return; symbolCache.clear(); for (const config of options.parsers) { await parseWorkspace(config, currentCwd, maxFiles, notify, queriesDirs); } }, refreshInterval); }, search: (query: string, ctx: PickerContext) => { const allSymbols: Symbol[] = []; for (const config of options.parsers) { const cacheKey = `${config.language}:${ctx.cwd}`; const symbols = symbolCache.get(cacheKey); if (symbols) { allSymbols.push(...symbols); } } if (allSymbols.length === 0) { return null; } const filtered = filterSymbols(allSymbols, query); if (filtered.length === 0) { return null; } return filtered.map((symbol) => { const fileName = path.basename(symbol.file); return { value: formatSymbolValue(symbol, ctx.isQuotedPrefix), label: symbol.name, description: `${symbol.kind} · ${fileName}:${symbol.line}`, }; }); }, clearCache: () => { symbolCache.clear(); runtimeCache.clear(); }, destroy: () => { symbolCache.clear(); runtimeCache.clear(); if (refreshTimer) { clearInterval(refreshTimer); refreshTimer = null; } currentCwd = ""; }, }); }; ================================================ FILE: agents/skills/build-feature/SKILL.md ================================================ --- name: build-feature description: Use when creating or developing a feature, this contains a focused instructions for building features. --- ## Rules - Make sure you're certain from all ambiguities, otherwise ask for clarification - If there's a plan, follow it and ask for clarification if needed - Follow the Tracer Bullets to build a small, focused, end-to-end slice of the feature ## Tracer Bullets When building features, build a tiny, end-to-end slice of the feature first, seek feedback, then expand out from there. Tracer bullets comes from the Pragmatic Programmer. When building systems, you want to write code that gets you feedback as quickly as possible. Tracer bullets are small slices of functionality that go through all layers of the system, allowing you to test and validate your approach early. This helps in identifying potential issues and ensures that the overall architecture is sound before investing significant time in development. ================================================ FILE: agents/skills/code-review/SKILL.md ================================================ --- name: code-review version: 1.0.0 description: Code review entire project --- # Code Review Act as a highly critical and analytical code reviewer. ## Process - Provide detailed feedback on code quality, correctness, and maintainability - Offer suggestions for improvements and potential alternatives - Ensure code is readable, maintainable, and efficient $ARGUMENTS ================================================ FILE: agents/skills/debugging/SKILL.md ================================================ --- name: debugging description: Use when encountering any bug, test failure, or unexpected behavior, before proposing fixes - four-phase framework (root cause investigation, pattern analysis, hypothesis testing, implementation) that ensures understanding before attempting solutions --- # Systematic Debugging ## Overview Random fixes waste time and create new bugs. Quick patches mask underlying issues. **Core principle:** ALWAYS find root cause before attempting fixes. Symptom fixes are failure. **Violating the letter of this process is violating the spirit of debugging.** ## The Iron Law ``` NO FIXES WITHOUT ROOT CAUSE INVESTIGATION FIRST ``` If you haven't completed Phase 1, you cannot propose fixes. ## When to Use Use for ANY technical issue: - Test failures - Bugs in production - Unexpected behavior - Performance problems - Build failures - Integration issues **Use this ESPECIALLY when:** - Under time pressure (emergencies make guessing tempting) - "Just one quick fix" seems obvious - You've already tried multiple fixes - Previous fix didn't work - You don't fully understand the issue **Don't skip when:** - Issue seems simple (simple bugs have root causes too) - You're in a hurry (rushing guarantees rework) - Manager wants it fixed NOW (systematic is faster than thrashing) ## The Four Phases You MUST complete each phase before proceeding to the next. ### Phase 1: Root Cause Investigation **BEFORE attempting ANY fix:** 1. **Read Error Messages Carefully** - Don't skip past errors or warnings - They often contain the exact solution - Read stack traces completely - Note line numbers, file paths, error codes 2. **Reproduce Consistently** - Can you trigger it reliably? - What are the exact steps? - Does it happen every time? - If not reproducible → gather more data, don't guess 3. **Check Recent Changes** - What changed that could cause this? - Git diff, recent commits - New dependencies, config changes - Environmental differences 4. **Gather Evidence in Multi-Component Systems** **WHEN system has multiple components (CI → build → signing, API → service → database):** **BEFORE proposing fixes, add diagnostic instrumentation:** ``` For EACH component boundary: - Log what data enters component - Log what data exits component - Verify environment/config propagation - Check state at each layer Run once to gather evidence showing WHERE it breaks THEN analyze evidence to identify failing component THEN investigate that specific component ``` **Example (multi-layer system):** ```bash # Layer 1: Workflow echo "=== Secrets available in workflow: ===" echo "IDENTITY: ${IDENTITY:+SET}${IDENTITY:-UNSET}" # Layer 2: Build script echo "=== Env vars in build script: ===" env | grep IDENTITY || echo "IDENTITY not in environment" # Layer 3: Signing script echo "=== Keychain state: ===" security list-keychains security find-identity -v # Layer 4: Actual signing codesign --sign "$IDENTITY" --verbose=4 "$APP" ``` **This reveals:** Which layer fails (secrets → workflow ✓, workflow → build ✗) 5. **Trace Data Flow** **WHEN error is deep in call stack:** **REQUIRED SUB-SKILL:** Use superpowers root-cause-tracing for backward tracing technique **Quick version:** - Where does bad value originate? - What called this with bad value? - Keep tracing up until you find the source - Fix at source, not at symptom ### Phase 2: Pattern Analysis **Find the pattern before fixing:** 1. **Find Working Examples** - Locate similar working code in same codebase - What works that's similar to what's broken? 2. **Compare Against References** - If implementing pattern, read reference implementation COMPLETELY - Don't skim - read every line - Understand the pattern fully before applying 3. **Identify Differences** - What's different between working and broken? - List every difference, however small - Don't assume "that can't matter" 4. **Understand Dependencies** - What other components does this need? - What settings, config, environment? - What assumptions does it make? ### Phase 3: Hypothesis and Testing **Scientific method:** 1. **Form Single Hypothesis** - State clearly: "I think X is the root cause because Y" - Write it down - Be specific, not vague 2. **Test Minimally** - Make the SMALLEST possible change to test hypothesis - One variable at a time - Don't fix multiple things at once 3. **Verify Before Continuing** - Did it work? Yes → Phase 4 - Didn't work? Form NEW hypothesis - DON'T add more fixes on top 4. **When You Don't Know** - Say "I don't understand X" - Don't pretend to know - Ask for help - Research more ### Phase 4: Implementation **Fix the root cause, not the symptom:** 1. **Decide on Testing Strategy** **Auto-decide based on complexity:** - **Write test for**: Complex algorithms, business logic, data transformations where bugs are likely - **Skip test for**: UI components, React hooks, simple CRUD, straightforward mappings, anything you're 100% certain is correct - **Test type**: Only deterministic unit tests - no integration tests, no complex mocking, no async complexity **If writing test:** - Simplest possible reproduction - Automated test that fails before fix - Verify logic, not implementation details **If skipping test:** - Verify fix with typecheck/lint - Manual verification for UI changes - Code review confidence that fix is correct 2. **Implement Single Fix** - Address the root cause identified - ONE change at a time - No "while I'm here" improvements - No bundled refactoring 3. **Verify Fix** **If test was written:** - Test passes now? - No other tests broken? **If no test:** - Typecheck passes? - Lint clean? - Manual verification confirms fix? **Always check:** - Issue actually resolved? - No regressions in related functionality? 4. **If Fix Doesn't Work** - STOP - Count: How many fixes have you tried? - If < 3: Return to Phase 1, re-analyze with new information - **If ≥ 3: STOP and question the architecture (step 5 below)** - DON'T attempt Fix #4 without architectural discussion 5. **If 3+ Fixes Failed: Question Architecture** **Pattern indicating architectural problem:** - Each fix reveals new shared state/coupling/problem in different place - Fixes require "massive refactoring" to implement - Each fix creates new symptoms elsewhere **STOP and question fundamentals:** - Is this pattern fundamentally sound? - Are we "sticking with it through sheer inertia"? - Should we refactor architecture vs. continue fixing symptoms? **Discuss with your human partner before attempting more fixes** This is NOT a failed hypothesis - this is a wrong architecture. ## Red Flags - STOP and Follow Process If you catch yourself thinking: - "Quick fix for now, investigate later" - "Just try changing X and see if it works" - "Add multiple changes, run tests" - "It's probably X, let me fix that" - "I don't fully understand but this might work" - "Pattern says X but I'll adapt it differently" - "Here are the main problems: [lists fixes without investigation]" - Proposing solutions before tracing data flow - **"One more fix attempt" (when already tried 2+)** - **Each fix reveals new problem in different place** - **Writing tests for UI components when you're certain the fix is correct** **ALL of these mean: STOP. Return to Phase 1.** **If 3+ fixes failed:** Question the architecture (see Phase 4.5) ## your human partner's Signals You're Doing It Wrong **Watch for these redirections:** - "Is that not happening?" - You assumed without verifying - "Will it show us...?" - You should have added evidence gathering - "Stop guessing" - You're proposing fixes without understanding - "Ultrathink this" - Question fundamentals, not just symptoms - "We're stuck?" (frustrated) - Your approach isn't working **When you see these:** STOP. Return to Phase 1. ## Common Rationalizations | Excuse | Reality | | -------------------------------------------- | ----------------------------------------------------------------------------- | | "Issue is simple, don't need process" | Simple issues have root causes too. Process is fast for simple bugs. | | "Emergency, no time for process" | Systematic debugging is FASTER than guess-and-check thrashing. | | "Just try this first, then investigate" | First fix sets the pattern. Do it right from the start. | | "Multiple fixes at once saves time" | Can't isolate what worked. Causes new bugs. | | "Reference too long, I'll adapt the pattern" | Partial understanding guarantees bugs. Read it completely. | | "I see the problem, let me fix it" | Seeing symptoms ≠ understanding root cause. | | "One more fix attempt" (after 2+ failures) | 3+ failures = architectural problem. Question pattern, don't fix again. | | "UI fix doesn't need tests" | Correct! UI components verified via typecheck/manual testing, not unit tests. | ## Quick Reference | Phase | Key Activities | Success Criteria | | --------------------- | ------------------------------------------------------ | --------------------------- | | **1. Root Cause** | Read errors, reproduce, check changes, gather evidence | Understand WHAT and WHY | | **2. Pattern** | Find working examples, compare | Identify differences | | **3. Hypothesis** | Form theory, test minimally | Confirmed or new hypothesis | | **4. Implementation** | Create test, fix, verify | Bug resolved, tests pass | ## When Process Reveals "No Root Cause" If systematic investigation reveals issue is truly environmental, timing-dependent, or external: 1. You've completed the process 2. Document what you investigated 3. Implement appropriate handling (retry, timeout, error message) 4. Add monitoring/logging for future investigation **But:** 95% of "no root cause" cases are incomplete investigation. ## Integration with Other Skills **This skill requires using:** - **root-cause-tracing** - REQUIRED when error is deep in call stack (see Phase 1, Step 5) **Testing skills (when needed):** - **test-driven-development** (if available) - Use when fixing complex business logic that needs test coverage - Skip for UI components, simple CRUD, or anything verifiable via typecheck/manual testing ## Real-World Impact From debugging sessions: - Systematic approach: 15-30 minutes to fix - Random fixes approach: 2-3 hours of thrashing - First-time fix rate: 95% vs 40% - New bugs introduced: Near zero vs common ================================================ FILE: agents/skills/deslop/SKILL.md ================================================ --- name: deslop description: Remove all AI-generated slop like useless comments, try/catch blocks, and casts --- Remove all AI-generated slop, this includes: - Extra comments that a human wouldn't add or is inconsistent with the rest of the file, remove the what, keep the why - Extra defensive checks or try/catch blocks that are abnormal for that area of the codebase (especially if called by trusted / validated codepaths) - Casts to any to get around type issues - Any other style that is inconsistent with the file Report at the end with only a 1-3 sentence summary of what you changed ================================================ FILE: agents/skills/effect-best-practices/SKILL.md ================================================ --- name: effect-best-practices description: Enforces Effect-TS patterns for services, errors, layers, and atoms. Use when writing code with Effect.Service, Schema.TaggedError, Layer composition, or effect-atom React components. metadata: version: 1.0.0 --- # Effect-TS Best Practices This skill enforces opinionated, consistent patterns for Effect-TS codebases. These patterns optimize for type safety, testability, observability, and maintainability. ## Quick Reference: Critical Rules | Category | DO | DON'T | |----------|-----|-------| | Services | `Effect.Service` with `accessors: true` | `Context.Tag` for business logic | | Dependencies | `dependencies: [Dep.Default]` in service | Manual `Layer.provide` at usage sites | | Errors | `Schema.TaggedError` with `message` field | Plain classes or generic Error | | Error Specificity | `UserNotFoundError`, `SessionExpiredError` | Generic `NotFoundError`, `BadRequestError` | | Error Handling | `catchTag`/`catchTags` | `catchAll` or `mapError` | | IDs | `Schema.UUID.pipe(Schema.brand("@App/EntityId"))` | Plain `string` for entity IDs | | Functions | `Effect.fn("Service.method")` | Anonymous generators | | Logging | `Effect.log` with structured data | `console.log` | | Config | `Config.*` with validation | `process.env` directly | | Options | `Option.match` with both cases | `Option.getOrThrow` | | Nullability | `Option` in domain types | `null`/`undefined` | | Atoms | `Atom.make` outside components | Creating atoms inside render | | Atom State | `Atom.keepAlive` for global state | Forgetting keepAlive for persistent state | | Atom Updates | `useAtomSet` in React components | `Atom.update` imperatively from React | | Atom Cleanup | `get.addFinalizer()` for side effects | Missing cleanup for event listeners | | Atom Results | `Result.builder` with `onErrorTag` | Ignoring loading/error states | ## Service Definition Pattern **Always use `Effect.Service`** for business logic services. This provides automatic accessors, built-in `Default` layer, and proper dependency declaration. ```typescript import { Effect } from "effect" export class UserService extends Effect.Service()("UserService", { accessors: true, dependencies: [UserRepo.Default, CacheService.Default], effect: Effect.gen(function* () { const repo = yield* UserRepo const cache = yield* CacheService const findById = Effect.fn("UserService.findById")(function* (id: UserId) { const cached = yield* cache.get(id) if (Option.isSome(cached)) return cached.value const user = yield* repo.findById(id) yield* cache.set(id, user) return user }) const create = Effect.fn("UserService.create")(function* (data: CreateUserInput) { const user = yield* repo.create(data) yield* Effect.log("User created", { userId: user.id }) return user }) return { findById, create } }), }) {} // Usage - dependencies are already wired const program = Effect.gen(function* () { const user = yield* UserService.findById(userId) return user }) // At app root const MainLive = Layer.mergeAll(UserService.Default, OtherService.Default) ``` **When `Context.Tag` is acceptable:** - Infrastructure with runtime injection (Cloudflare KV, worker bindings) - Factory patterns where resources are provided externally See `references/service-patterns.md` for detailed patterns. ## Error Definition Pattern **Always use `Schema.TaggedError`** for errors. This makes them serializable (required for RPC) and provides consistent structure. ```typescript import { Schema } from "effect" import { HttpApiSchema } from "@effect/platform" export class UserNotFoundError extends Schema.TaggedError()( "UserNotFoundError", { userId: UserId, message: Schema.String, }, HttpApiSchema.annotations({ status: 404 }), ) {} export class UserCreateError extends Schema.TaggedError()( "UserCreateError", { message: Schema.String, cause: Schema.optional(Schema.String), }, HttpApiSchema.annotations({ status: 400 }), ) {} ``` **Error handling - use `catchTag`/`catchTags`:** ```typescript // CORRECT - preserves type information yield* repo.findById(id).pipe( Effect.catchTag("DatabaseError", (err) => Effect.fail(new UserNotFoundError({ userId: id, message: "Lookup failed" })) ), Effect.catchTag("ConnectionError", (err) => Effect.fail(new ServiceUnavailableError({ message: "Database unreachable" })) ), ) // CORRECT - multiple tags at once yield* effect.pipe( Effect.catchTags({ DatabaseError: (err) => Effect.fail(new UserNotFoundError({ userId: id, message: err.message })), ValidationError: (err) => Effect.fail(new InvalidEmailError({ email: input.email, message: err.message })), }), ) ``` ### Prefer Explicit Over Generic Errors **Every distinct failure reason deserves its own error type.** Don't collapse multiple failure modes into generic HTTP errors. ```typescript // WRONG - Generic errors lose information export class NotFoundError extends Schema.TaggedError()( "NotFoundError", { message: Schema.String }, HttpApiSchema.annotations({ status: 404 }), ) {} // Then mapping everything to it: Effect.catchTags({ UserNotFoundError: (err) => Effect.fail(new NotFoundError({ message: "Not found" })), ChannelNotFoundError: (err) => Effect.fail(new NotFoundError({ message: "Not found" })), MessageNotFoundError: (err) => Effect.fail(new NotFoundError({ message: "Not found" })), }) // Frontend gets useless: { _tag: "NotFoundError", message: "Not found" } // Which resource? User? Channel? Message? Can't tell! ``` ```typescript // CORRECT - Explicit domain errors with rich context export class UserNotFoundError extends Schema.TaggedError()( "UserNotFoundError", { userId: UserId, message: Schema.String }, HttpApiSchema.annotations({ status: 404 }), ) {} export class ChannelNotFoundError extends Schema.TaggedError()( "ChannelNotFoundError", { channelId: ChannelId, message: Schema.String }, HttpApiSchema.annotations({ status: 404 }), ) {} export class SessionExpiredError extends Schema.TaggedError()( "SessionExpiredError", { sessionId: SessionId, expiredAt: Schema.DateTimeUtc, message: Schema.String }, HttpApiSchema.annotations({ status: 401 }), ) {} // Frontend can now show specific UI: // - UserNotFoundError → "User doesn't exist" // - ChannelNotFoundError → "Channel was deleted" // - SessionExpiredError → "Your session expired. Please log in again." ``` See `references/error-patterns.md` for error remapping and retry patterns. ## Schema & Branded Types Pattern **Brand all entity IDs** for type safety across service boundaries: ```typescript import { Schema } from "effect" // Entity IDs - always branded export const UserId = Schema.UUID.pipe(Schema.brand("@App/UserId")) export type UserId = Schema.Schema.Type export const OrganizationId = Schema.UUID.pipe(Schema.brand("@App/OrganizationId")) export type OrganizationId = Schema.Schema.Type // Domain types - use Schema.Struct export const User = Schema.Struct({ id: UserId, email: Schema.String, name: Schema.String, organizationId: OrganizationId, createdAt: Schema.DateTimeUtc, }) export type User = Schema.Schema.Type // Input types for mutations export const CreateUserInput = Schema.Struct({ email: Schema.String.pipe(Schema.pattern(/^[^\s@]+@[^\s@]+\.[^\s@]+$/)), name: Schema.String.pipe(Schema.minLength(1)), organizationId: OrganizationId, }) export type CreateUserInput = Schema.Schema.Type ``` **When NOT to brand:** - Simple strings that don't cross service boundaries (URLs, file paths) - Primitive config values See `references/schema-patterns.md` for transforms and advanced patterns. ## Function Pattern with Effect.fn **Always use `Effect.fn`** for service methods. This provides automatic tracing with proper span names: ```typescript // CORRECT - Effect.fn with descriptive name const findById = Effect.fn("UserService.findById")(function* (id: UserId) { yield* Effect.annotateCurrentSpan("userId", id) const user = yield* repo.findById(id) return user }) // CORRECT - Effect.fn with multiple parameters const transfer = Effect.fn("AccountService.transfer")( function* (fromId: AccountId, toId: AccountId, amount: number) { yield* Effect.annotateCurrentSpan("fromId", fromId) yield* Effect.annotateCurrentSpan("toId", toId) yield* Effect.annotateCurrentSpan("amount", amount) // ... } ) ``` ## Layer Composition **Declare dependencies in the service**, not at usage sites: ```typescript // CORRECT - dependencies in service definition export class OrderService extends Effect.Service()("OrderService", { accessors: true, dependencies: [ UserService.Default, ProductService.Default, PaymentService.Default, ], effect: Effect.gen(function* () { const users = yield* UserService const products = yield* ProductService const payments = yield* PaymentService // ... }), }) {} // At app root - simple merge const AppLive = Layer.mergeAll( OrderService.Default, // Infrastructure layers (intentionally not in dependencies) DatabaseLive, RedisLive, ) ``` See `references/layer-patterns.md` for testing layers and config-dependent layers. ## Option Handling **Never use `Option.getOrThrow`**. Always handle both cases explicitly: ```typescript // CORRECT - explicit handling yield* Option.match(maybeUser, { onNone: () => Effect.fail(new UserNotFoundError({ userId, message: "Not found" })), onSome: (user) => Effect.succeed(user), }) // CORRECT - with getOrElse for defaults const name = Option.getOrElse(maybeName, () => "Anonymous") // CORRECT - Option.map for transformations const upperName = Option.map(maybeName, (n) => n.toUpperCase()) ``` ## Effect Atom (Frontend State) Effect Atom provides reactive state management for React with Effect integration. ### Basic Atoms ```typescript import { Atom } from "@effect-atom/atom-react" // Define atoms OUTSIDE components const countAtom = Atom.make(0) // Use keepAlive for global state that should persist const userPrefsAtom = Atom.make({ theme: "dark" }).pipe(Atom.keepAlive) // Atom families for per-entity state const modalAtomFamily = Atom.family((type: string) => Atom.make({ isOpen: false }).pipe(Atom.keepAlive) ) ``` ### React Integration ```typescript import { useAtomValue, useAtomSet, useAtom, useAtomMount } from "@effect-atom/atom-react" function Counter() { const count = useAtomValue(countAtom) // Read only const setCount = useAtomSet(countAtom) // Write only const [value, setValue] = useAtom(countAtom) // Read + write return } // Mount side-effect atoms without reading value function App() { useAtomMount(keyboardShortcutsAtom) return <>{children} } ``` ### Handling Results with Result.builder **Use `Result.builder`** for rendering effectful atom results. It provides chainable error handling with `onErrorTag`: ```typescript import { Result } from "@effect-atom/atom-react" function UserProfile() { const userResult = useAtomValue(userAtom) // Result return Result.builder(userResult) .onInitial(() =>
Loading...
) .onErrorTag("NotFoundError", () =>
User not found
) .onError((error) =>
Error: {error.message}
) .onSuccess((user) =>
Hello, {user.name}
) .render() } ``` ### Atoms with Side Effects ```typescript const scrollYAtom = Atom.make((get) => { const onScroll = () => get.setSelf(window.scrollY) window.addEventListener("scroll", onScroll) get.addFinalizer(() => window.removeEventListener("scroll", onScroll)) // REQUIRED return window.scrollY }).pipe(Atom.keepAlive) ``` See `references/effect-atom-patterns.md` for complete patterns including families, localStorage, and anti-patterns. ## RPC & Cluster Patterns For RPC contracts and cluster workflows, see: - `references/rpc-cluster-patterns.md` - RpcGroup, Workflow.make, Activity patterns ## Anti-Patterns (Forbidden) These patterns are **never acceptable**: ```typescript // FORBIDDEN - runSync/runPromise inside services const result = Effect.runSync(someEffect) // Never do this // FORBIDDEN - throw inside Effect.gen yield* Effect.gen(function* () { if (bad) throw new Error("No!") // Use Effect.fail instead }) // FORBIDDEN - catchAll losing type info yield* effect.pipe(Effect.catchAll(() => Effect.fail(new GenericError()))) // FORBIDDEN - console.log console.log("debug") // Use Effect.log // FORBIDDEN - process.env directly const key = process.env.API_KEY // Use Config.string("API_KEY") // FORBIDDEN - null/undefined in domain types type User = { name: string | null } // Use Option ``` See `references/anti-patterns.md` for the complete list with rationale. ## Observability ```typescript // Structured logging yield* Effect.log("Processing order", { orderId, userId, amount }) // Metrics const orderCounter = Metric.counter("orders_processed") yield* Metric.increment(orderCounter) // Config with validation const config = Config.all({ port: Config.integer("PORT").pipe(Config.withDefault(3000)), apiKey: Config.secret("API_KEY"), maxRetries: Config.integer("MAX_RETRIES").pipe( Config.validate({ message: "Must be positive", validation: (n) => n > 0 }) ), }) ``` See `references/observability-patterns.md` for metrics and tracing patterns. ## Reference Files For detailed patterns, consult these reference files in the `references/` directory: - `service-patterns.md` - Service definition, Effect.fn, Context.Tag exceptions - `error-patterns.md` - Schema.TaggedError, error remapping, retry patterns - `schema-patterns.md` - Branded types, transforms, Schema.Class - `layer-patterns.md` - Dependency composition, testing layers - `rpc-cluster-patterns.md` - RpcGroup, Workflow, Activity patterns - `effect-atom-patterns.md` - Atom, families, React hooks, Result handling - `anti-patterns.md` - Complete list of forbidden patterns - `observability-patterns.md` - Logging, metrics, config patterns ================================================ FILE: agents/skills/effect-best-practices/references/anti-patterns.md ================================================ # Anti-Patterns (Forbidden) These patterns are **never acceptable** in Effect-TS code. Each is listed with rationale and the correct alternative. ## FORBIDDEN: Effect.runSync/runPromise Inside Services ```typescript // FORBIDDEN export class UserService extends Effect.Service()("UserService", { effect: Effect.gen(function* () { const findById = (id: UserId) => { // Running effects synchronously breaks composition const user = Effect.runSync(repo.findById(id)) return user } return { findById } }), }) {} ``` **Why:** Breaks Effect's composition model, loses error handling, can't be tested, loses tracing. **Correct:** ```typescript const findById = Effect.fn("UserService.findById")(function* (id: UserId) { return yield* repo.findById(id) }) ``` ## FORBIDDEN: throw Inside Effect.gen ```typescript // FORBIDDEN yield* Effect.gen(function* () { const user = yield* repo.findById(id) if (!user) { throw new Error("User not found") // Bypasses Effect error channel } return user }) ``` **Why:** Throws bypass Effect's error channel, can't be caught with `catchTag`, breaks type safety. **Correct:** ```typescript yield* Effect.gen(function* () { const user = yield* repo.findById(id) if (!user) { return yield* Effect.fail(new UserNotFoundError({ userId: id, message: "Not found" })) } return user }) ``` ## FORBIDDEN: catchAll Losing Type Information ```typescript // FORBIDDEN yield* someEffect.pipe( Effect.catchAll((err) => Effect.fail(new GenericError({ message: "Something failed" })) ) ) ``` **Why:** Loses specific error information, makes debugging harder, prevents specific error handling downstream. **Correct:** ```typescript yield* someEffect.pipe( Effect.catchTags({ DatabaseError: (err) => Effect.fail(new ServiceUnavailableError({ message: err.message })), ValidationError: (err) => Effect.fail(new BadRequestError({ message: err.message })), }), ) ``` ## FORBIDDEN: any/unknown Casts ```typescript // FORBIDDEN const data = someValue as any const result = (await fetch(url)) as unknown as MyType ``` **Why:** Completely bypasses type safety, can cause runtime errors, loses Effect's type guarantees. **Correct:** ```typescript // Use Schema for parsing unknown data const result = yield* Schema.decodeUnknown(MyType)(someValue) // Or explicit type guards if (isMyType(someValue)) { // Now safely typed } ``` ## FORBIDDEN: Promise in Service Signatures ```typescript // FORBIDDEN export class UserService extends Effect.Service()("UserService", { effect: Effect.gen(function* () { return { findById: async (id: UserId): Promise => { // Using Promise instead of Effect } } }), }) {} ``` **Why:** Loses Effect's error handling, can't compose with other Effects, loses tracing/metrics. **Correct:** ```typescript const findById = Effect.fn("UserService.findById")( function* (id: UserId): Effect.Effect { // ... } ) ``` ## FORBIDDEN: console.log ```typescript // FORBIDDEN console.log("Processing order:", orderId) console.error("Error:", error) ``` **Why:** Not structured, not captured by Effect's logging system, lost in production telemetry. **Correct:** ```typescript yield* Effect.log("Processing order", { orderId }) yield* Effect.logError("Operation failed", { error: String(error) }) ``` ## FORBIDDEN: process.env Directly ```typescript // FORBIDDEN const apiKey = process.env.API_KEY const port = parseInt(process.env.PORT || "3000") ``` **Why:** No validation, no type safety, fails silently if missing, hard to test. **Correct:** ```typescript const config = yield* Config.all({ apiKey: Config.secret("API_KEY"), port: Config.integer("PORT").pipe(Config.withDefault(3000)), }) ``` ## FORBIDDEN: null/undefined in Domain Types ```typescript // FORBIDDEN type User = { name: string bio: string | null avatar: string | undefined } ``` **Why:** Null/undefined handling is error-prone, loses the explicit "absence" semantics. **Correct:** ```typescript const User = Schema.Struct({ name: Schema.String, bio: Schema.Option(Schema.String), avatar: Schema.Option(Schema.String), }) ``` ## FORBIDDEN: Option.getOrThrow ```typescript // FORBIDDEN const user = Option.getOrThrow(maybeUser) const name = pipe(maybeName, Option.getOrThrow) ``` **Why:** Throws exceptions, bypasses Effect's error handling, fails at runtime instead of compile time. **Correct:** ```typescript // Handle both cases explicitly yield* Option.match(maybeUser, { onNone: () => Effect.fail(new UserNotFoundError({ userId, message: "Not found" })), onSome: Effect.succeed, }) // Or provide a default const name = Option.getOrElse(maybeName, () => "Anonymous") // Or use Option.map for transformations const upperName = Option.map(maybeName, (n) => n.toUpperCase()) ``` ## FORBIDDEN: Context.Tag for Business Services ```typescript // FORBIDDEN export class UserService extends Context.Tag("UserService")< UserService, { findById: (id: UserId) => Effect.Effect } >() { static Default = Layer.effect(this, Effect.gen(function* () { ... })) } ``` **Why:** Requires manual layer creation, no built-in accessors, more boilerplate. **Correct:** ```typescript export class UserService extends Effect.Service()("UserService", { accessors: true, dependencies: [...], effect: Effect.gen(function* () { ... }), }) {} ``` ## FORBIDDEN: Ignoring Errors with orDie ```typescript // FORBIDDEN (in most cases) yield* someEffect.pipe(Effect.orDie) ``` **Why:** Converts recoverable errors to defects (unrecoverable), loses error information. **Acceptable exceptions:** - Truly unrecoverable situations (invalid program state) - After exhausting all recovery options - In test setup code **Correct:** ```typescript // Handle errors explicitly yield* someEffect.pipe( Effect.catchTag("RecoverableError", (err) => Effect.fail(new DomainError({ message: err.message })) ), ) ``` ## FORBIDDEN: mapError Instead of catchTag ```typescript // FORBIDDEN yield* effect.pipe( Effect.mapError((err) => new GenericError({ message: String(err) })) ) ``` **Why:** Loses error type information, can't discriminate between error types. **Correct:** ```typescript yield* effect.pipe( Effect.catchTag("SpecificError", (err) => Effect.fail(new MappedError({ message: err.message })) ), ) ``` ## FORBIDDEN: Mixing Effect and Promise Chains ```typescript // FORBIDDEN const result = await someEffect.pipe( Effect.runPromise, ).then(data => { // Mixing Promise chain with Effect return Effect.runPromise(anotherEffect(data)) }) ``` **Why:** Loses Effect composition benefits, error handling becomes inconsistent. **Correct:** ```typescript const program = Effect.gen(function* () { const data = yield* someEffect return yield* anotherEffect(data) }) const result = await Effect.runPromise(program) ``` ## FORBIDDEN: Mutable State Without Ref ```typescript // FORBIDDEN let counter = 0 const increment = Effect.sync(() => { counter++ }) ``` **Why:** Race conditions, not testable, not composable, breaks referential transparency. **Correct:** ```typescript const program = Effect.gen(function* () { const counter = yield* Ref.make(0) yield* Ref.update(counter, (n) => n + 1) return yield* Ref.get(counter) }) ``` ## FORBIDDEN: Using Date.now() or new Date() Directly ```typescript // FORBIDDEN const now = new Date() const timestamp = Date.now() ``` **Why:** Not testable, introduces non-determinism, hard to mock in tests. **Correct:** ```typescript import { Clock } from "effect" const now = yield* Clock.currentTimeMillis const date = yield* Clock.currentTimeZone.pipe( Effect.map((tz) => new Date()) ) ``` ================================================ FILE: agents/skills/effect-best-practices/references/effect-atom-patterns.md ================================================ # Effect Atom Patterns Effect Atom is a reactive state management library that integrates with Effect-TS. It provides atoms (reactive containers), automatic dependency tracking, and seamless React integration. ## Core Concepts - **Atoms**: Reactive state containers with automatic dependency tracking - **Result**: Handles async/effectful computations with initial, success, and failure states - **Finalizers**: Built-in cleanup for resources and event listeners - **Families**: Dynamic atom creation for per-entity state ## Creating Atoms ### Basic Atoms ```typescript import { Atom } from "@effect-atom/atom-react" // Simple value atom const countAtom = Atom.make(0) // With keepAlive - persists when no components subscribe const persistentCountAtom = Atom.make(0).pipe(Atom.keepAlive) ``` **Rule:** Use `Atom.keepAlive` for global state that should persist across component unmounts. ### Derived Atoms ```typescript const countAtom = Atom.make(0) // Derived using get function const doubleCountAtom = Atom.make((get) => get(countAtom) * 2) // Derived using Atom.map const tripleCountAtom = Atom.map(countAtom, (count) => count * 3) ``` ### Atoms with Side Effects ```typescript // Track window scroll position const scrollYAtom = Atom.make((get) => { const onScroll = () => get.setSelf(window.scrollY) window.addEventListener("scroll", onScroll) get.addFinalizer(() => window.removeEventListener("scroll", onScroll)) return window.scrollY }).pipe(Atom.keepAlive) ``` **Critical:** - Use `get.setSelf` to update the atom's own value - Always add finalizers with `get.addFinalizer()` to clean up side effects - Finalizers run when the atom is rebuilt or disposed ### Atom.transform for Self-Updating Derived State ```typescript const resolvedThemeAtom = Atom.transform(themeAtom, (get) => { const theme = get(themeAtom) if (theme !== "system") return theme const matcher = window.matchMedia("(prefers-color-scheme: dark)") const onChange = () => get.setSelf(matcher.matches ? "dark" : "light") matcher.addEventListener("change", onChange) get.addFinalizer(() => matcher.removeEventListener("change", onChange)) return matcher.matches ? "dark" : "light" }) ``` ## Atom Families Use `Atom.family` for per-entity state: ```typescript import { Atom } from "@effect-atom/atom-react" // Create a family of atoms - one per channelId const replyToMessageAtomFamily = Atom.family((channelId: string) => Atom.make(null).pipe(Atom.keepAlive) ) // Modal state family type ModalType = "settings" | "confirm" | "create" interface ModalState { type: ModalType isOpen: boolean metadata?: Record } const modalAtomFamily = Atom.family((type: ModalType) => Atom.make({ type, isOpen: false, metadata: undefined, }).pipe(Atom.keepAlive) ) ``` **Use families for:** - Per-resource state (users, channels, documents) - Modal instances - Form state per entity - Any parameterized state ## React Integration ### Reading Atom Values ```typescript import { useAtomValue } from "@effect-atom/atom-react" function Counter() { const count = useAtomValue(countAtom) return {count} } ``` ### Updating Atom Values ```typescript import { useAtomSet } from "@effect-atom/atom-react" function IncrementButton() { const setCount = useAtomSet(countAtom) return ( ) } ``` ### Reading and Writing Together ```typescript import { useAtom } from "@effect-atom/atom-react" function CounterControl() { const [count, setCount] = useAtom(countAtom) return (
{count}
) } ``` ### Mounting Side-Effect Atoms Use `useAtomMount` to activate atoms without reading their value: ```typescript import { useAtomMount } from "@effect-atom/atom-react" function App() { // Activate side effects without subscribing to value useAtomMount(keyboardShortcutsAtom) useAtomMount(presenceTrackingAtom) useAtomMount(themeApplierAtom) return <>{children} } ``` ## Working with Effects and Results ### Effectful Atoms Return Result ```typescript import { Atom, Result } from "@effect-atom/atom-react" import { Effect } from "effect" const userAtom = Atom.make( Effect.gen(function* () { const response = yield* fetchUser() return response }) ) // Type: Atom> ``` ### Handling Results with Result.builder (Recommended) **Use `Result.builder`** for rendering Result types. It provides a chainable API with granular error handling and type narrowing. ```typescript import { Result, useAtomValue } from "@effect-atom/atom-react" function UserProfile() { const userResult = useAtomValue(userAtom) return Result.builder(userResult) .onInitial(() =>
Loading...
) .onError((error) =>
Error: {error.message}
) .onSuccess((user) =>
Hello, {user.name}!
) .render() } ``` ### Result.builder with Tagged Errors **Key advantage**: Handle specific error types with `onErrorTag`: ```typescript function ResourceEmbed({ url }: { url: string }) { const resourceResult = useAtomValue(resourceAtom) return Result.builder(resourceResult) .onInitial(() => ) .onErrorTag("NotFoundError", (error) => ( )) .onErrorTag("UnauthorizedError", () => ( )) .onErrorTag("RateLimitError", (error) => ( )) .onError((error) => ( // Fallback for any other errors )) .onSuccess((data) => ) .render() } ``` ### Result.builder Methods | Method | Purpose | |--------|---------| | `onInitial(fn)` | Handle initial/loading state | | `onInitialOrWaiting(fn)` | Handle both initial and waiting states | | `onWaiting(fn)` | Handle waiting/refetching state | | `onSuccess(fn)` | Handle success with value | | `onError(fn)` | Handle any error | | `onErrorTag(tag, fn)` | Handle specific tagged error (removes from type) | | `onErrorIf(predicate, fn)` | Handle errors matching predicate | | `onFailure(fn)` | Handle failure with full Cause | | `onDefect(fn)` | Handle unexpected defects | | `render()` | Return result (null if unhandled initial) | | `orElse(fn)` | Provide fallback value | | `orNull()` | Return null for unhandled cases | ### Extracting Values with orElse For non-rendering use cases, extract values with `orElse`: ```typescript function useRepositories() { const reposResult = useAtomValue(repositoriesAtom) // Extract array or empty fallback const repositories = Result.builder(reposResult) .onSuccess((data) => data.repositories) .orElse(() => []) return repositories } ``` ### Result.getOrElse for Simple Extraction For simple value extraction without error handling: ```typescript function UserName() { const userResult = useAtomValue(userAtom) const user = Result.getOrElse(userResult, () => null) if (!user) return Loading... return {user.name} } ``` ### When to Use Each Pattern | Pattern | Use Case | |---------|----------| | `Result.builder` | UI rendering with multiple error types | | `Result.builder + onErrorTag` | APIs with tagged errors (HttpApi, RPC) | | `Result.builder + orElse` | Extracting values with fallback | | `Result.getOrElse` | Simple value extraction | | `Result.match` | Simple 3-case exhaustive matching | ### Accessing Results in Derived Atoms ```typescript const userProfileAtom = Atom.make( Effect.fnUntraced(function* (get: Atom.Context) { // Unwrap Result to get the value (waits for success) const user = yield* get.result(userAtom) const posts = yield* fetchUserPosts(user.id) return { user, posts } }) ) ``` ## Batching Updates Use `Atom.batch` for multiple updates: ```typescript const openModal = (type: ModalType, metadata?: Record) => { Atom.batch(() => { Atom.update(modalAtomFamily(type), (state) => ({ ...state, isOpen: true, metadata, })) }) } ``` ## localStorage Persistence ```typescript import { BrowserKeyValueStore } from "@effect/platform-browser" import { Atom } from "@effect-atom/atom-react" import { Schema } from "effect" // Create runtime with localStorage const localStorageRuntime = Atom.runtime(BrowserKeyValueStore.layerLocalStorage) // Persisted atom with schema validation const themeAtom = Atom.kvs({ runtime: localStorageRuntime, key: "app-theme", schema: Schema.Literal("dark", "light", "system"), defaultValue: () => "system" as const, }) ``` ## Anti-Patterns ### FORBIDDEN: Creating Atoms Inside Components ```typescript // WRONG - creates new atom on every render function Counter() { const countAtom = Atom.make(0) // New atom each render! const count = useAtomValue(countAtom) return
{count}
} // CORRECT - define atoms outside components const countAtom = Atom.make(0) function Counter() { const count = useAtomValue(countAtom) return
{count}
} ``` ### FORBIDDEN: Imperative Updates from React Components ```typescript // WRONG - doesn't trigger React re-renders export const openModal = (type: string) => { Atom.batch(() => { Atom.update(modalAtomFamily(type), (s) => ({ ...s, isOpen: true })) }) } function Component() { return } // CORRECT - use hooks for React integration export const useModal = (type: string) => { const state = useAtomValue(modalAtomFamily(type)) const setState = useAtomSet(modalAtomFamily(type)) const open = useCallback(() => { setState((prev) => ({ ...prev, isOpen: true })) }, [setState]) const close = useCallback(() => { setState((prev) => ({ ...prev, isOpen: false })) }, [setState]) return { isOpen: state.isOpen, open, close } } ``` **When imperative updates ARE acceptable:** - Event listeners outside React (keyboard shortcuts) - Effects running on atom changes - Non-UI state (analytics, logging) ### FORBIDDEN: Missing Finalizers ```typescript // WRONG - memory leak! const scrollAtom = Atom.make((get) => { const onScroll = () => get.setSelf(window.scrollY) window.addEventListener("scroll", onScroll) return window.scrollY }) // CORRECT - cleanup registered const scrollAtom = Atom.make((get) => { const onScroll = () => get.setSelf(window.scrollY) window.addEventListener("scroll", onScroll) get.addFinalizer(() => window.removeEventListener("scroll", onScroll)) return window.scrollY }) ``` ### FORBIDDEN: Missing keepAlive for Global State ```typescript // WRONG - state resets when component unmounts export const modalStateAtom = Atom.make({ isOpen: false }) // CORRECT - state persists export const modalStateAtom = Atom.make({ isOpen: false }).pipe(Atom.keepAlive) ``` ### FORBIDDEN: Ignoring Result Types ```typescript // WRONG - doesn't handle loading/error states const userResult = useAtomValue(userAtom) return
Hello, {userResult.name}
// Type error! // CORRECT - use Result.builder to handle all states const userResult = useAtomValue(userAtom) return Result.builder(userResult) .onInitial(() =>
Loading...
) .onError((error) =>
Error: {error.message}
) .onSuccess((user) =>
Hello, {user.name}
) .render() ``` ### FORBIDDEN: Updating State During Render ```typescript // WRONG - side effect during render function Component() { const count = useAtomValue(countAtom) Atom.set(countAtom, count + 1) // Never do this! return
{count}
} // CORRECT - use effects or event handlers function Component() { const count = useAtomValue(countAtom) const setCount = useAtomSet(countAtom) useEffect(() => { setCount((c) => c + 1) }, []) return
{count}
} ``` ## Performance Tips ### Selective Re-rendering ```typescript // WRONG - subscribes to entire state const state = useAtomValue(appStateAtom) const userName = state.user.name // CORRECT - derive focused atom const userNameAtom = Atom.map(appStateAtom, (state) => state.user.name) const userName = useAtomValue(userNameAtom) ``` ### When to Use keepAlive Use `Atom.keepAlive` for: - Global application state - Modal/dialog state - User preferences - Authentication state - Frequently accessed derived state Skip `keepAlive` for: - Component-local state that should reset - Temporary form state - State tied to component lifecycle ================================================ FILE: agents/skills/effect-best-practices/references/error-patterns.md ================================================ # Error Patterns ## Why Explicit Error Types? Generic errors like `BadRequestError` or `NotFoundError` seem convenient but create problems: | Generic Error | Problems | |--------------|----------| | `NotFoundError` | Which resource? How should frontend recover? | | `BadRequestError` | What's invalid? Can user fix it? | | `UnauthorizedError` | Session expired? Wrong credentials? Missing permission? | | `InternalServerError` | Retryable? User action needed? | **Explicit errors enable:** 1. **Specific UI messages** - "Your session expired" vs generic "Unauthorized" 2. **Targeted recovery** - Refresh token vs show login page 3. **Better observability** - Group errors by specific type in dashboards 4. **Type-safe handling** - `catchTag("SessionExpiredError")` vs generic catch ### Anti-Pattern: Generic Error Mapping ```typescript // ❌ WRONG - Collapsing to generic HTTP errors export class NotFoundError extends Schema.TaggedError()( "NotFoundError", { message: Schema.String }, HttpApiSchema.annotations({ status: 404 }), ) {} // At API boundaries: Effect.catchTags({ UserNotFoundError: (err) => Effect.fail(new NotFoundError({ message: "Not found" })), ChannelNotFoundError: (err) => Effect.fail(new NotFoundError({ message: "Not found" })), MessageNotFoundError: (err) => Effect.fail(new NotFoundError({ message: "Not found" })), }) // Frontend receives: { _tag: "NotFoundError", message: "Not found" } // - Can't show specific message ("User doesn't exist" vs "Channel was deleted") // - Can't take specific action (redirect to user search vs channel list) // - Debugging is harder (which resource was missing?) ``` ```typescript // ✅ CORRECT - Keep explicit errors all the way to frontend export class UserNotFoundError extends Schema.TaggedError()( "UserNotFoundError", { userId: UserId, message: Schema.String }, HttpApiSchema.annotations({ status: 404 }), ) {} export class ChannelNotFoundError extends Schema.TaggedError()( "ChannelNotFoundError", { channelId: ChannelId, message: Schema.String }, HttpApiSchema.annotations({ status: 404 }), ) {} // Frontend can handle each case: Result.builder(result) .onErrorTag("UserNotFoundError", (err) => ) .onErrorTag("ChannelNotFoundError", (err) => ) .onErrorTag("SessionExpiredError", () => ) .render() ``` ## Error Naming Conventions | Pattern | Example | Use For | |---------|---------|---------| | `{Entity}NotFoundError` | `UserNotFoundError`, `ChannelNotFoundError` | Resource lookups | | `{Entity}{Action}Error` | `UserCreateError`, `MessageUpdateError` | Mutations that fail | | `{Feature}Error` | `SessionExpiredError`, `RateLimitExceededError` | Feature-specific failures | | `{Integration}Error` | `WorkOSUserFetchError`, `StripePaymentError` | External service errors | | `Invalid{Field}Error` | `InvalidEmailError`, `InvalidPasswordError` | Validation failures | ### Rich Error Context Include context fields that help with debugging and UI handling: ```typescript // Entity errors → include entity ID export class UserNotFoundError extends Schema.TaggedError()( "UserNotFoundError", { userId: UserId, // Which user? message: Schema.String, }, HttpApiSchema.annotations({ status: 404 }), ) {} // Action errors → include input that failed export class UserCreateError extends Schema.TaggedError()( "UserCreateError", { email: Schema.String, // What email failed? reason: Schema.String, // Why? "duplicate", "invalid domain" message: Schema.String, }, HttpApiSchema.annotations({ status: 400 }), ) {} // Integration errors → include service name and retryable flag export class StripePaymentError extends Schema.TaggedError()( "StripePaymentError", { stripeErrorCode: Schema.String, retryable: Schema.Boolean, message: Schema.String, }, HttpApiSchema.annotations({ status: 402 }), ) {} // Auth errors → include expiry info export class SessionExpiredError extends Schema.TaggedError()( "SessionExpiredError", { sessionId: SessionId, expiredAt: Schema.DateTimeUtc, message: Schema.String, }, HttpApiSchema.annotations({ status: 401 }), ) {} ``` ## Schema.TaggedError for All Errors **Always use `Schema.TaggedError`** for defining errors. This provides: 1. **Serialization** - Errors can be sent over RPC/network 2. **Type safety** - `_tag` discriminator enables `catchTag` 3. **Consistent structure** - All errors have predictable shape 4. **HTTP status mapping** - Via `HttpApiSchema.annotations` ### Basic Error Definition ```typescript import { Schema } from "effect" import { HttpApiSchema } from "@effect/platform" export class UserNotFoundError extends Schema.TaggedError()( "UserNotFoundError", { userId: UserId, message: Schema.String, }, HttpApiSchema.annotations({ status: 404 }), ) {} export class UserCreateError extends Schema.TaggedError()( "UserCreateError", { message: Schema.String, cause: Schema.optional(Schema.String), }, HttpApiSchema.annotations({ status: 400 }), ) {} export class UnauthorizedError extends Schema.TaggedError()( "UnauthorizedError", { message: Schema.String, }, HttpApiSchema.annotations({ status: 401 }), ) {} export class ForbiddenError extends Schema.TaggedError()( "ForbiddenError", { message: Schema.String, requiredPermission: Schema.optional(Schema.String), }, HttpApiSchema.annotations({ status: 403 }), ) {} ``` ### Required Fields Every error should have: - `message: Schema.String` - Human-readable description - Relevant context fields (IDs, etc.) - Optional `cause: Schema.optional(Schema.String)` for error chains ## Error Handling with catchTag/catchTags **Never use `catchAll` or `mapError`** when you can use `catchTag`/`catchTags`. These preserve type information and enable precise error handling. ### catchTag for Single Error Types ```typescript const findUser = Effect.fn("UserService.findUser")(function* (id: UserId) { return yield* repo.findById(id).pipe( Effect.catchTag("DatabaseError", (err) => Effect.fail(new UserNotFoundError({ userId: id, message: `Database lookup failed: ${err.message}`, })) ), ) }) ``` ### catchTags for Multiple Error Types ```typescript const processOrder = Effect.fn("OrderService.processOrder")(function* (input: OrderInput) { return yield* validateAndProcess(input).pipe( Effect.catchTags({ ValidationError: (err) => Effect.fail(new OrderValidationError({ message: err.message, field: err.field, })), PaymentError: (err) => Effect.fail(new OrderPaymentError({ message: `Payment failed: ${err.message}`, code: err.code, })), InventoryError: (err) => Effect.fail(new OrderInventoryError({ productId: err.productId, message: "Insufficient inventory", })), }), ) }) ``` ### Why Not catchAll? ```typescript // WRONG - Loses type information yield* effect.pipe( Effect.catchAll((err) => Effect.fail(new InternalServerError({ message: "Something failed" })) ) ) // Problems: // 1. Can't distinguish error types downstream // 2. Hides useful error context // 3. Makes debugging harder // 4. Frontend can't show specific messages ``` ## Error Remapping Pattern Create reusable error remapping functions for common transformations: ```typescript import { Effect } from "effect" export const withRemapDbErrors = ( effect: Effect.Effect, context: { entityType: string; entityId: string } ): Effect.Effect => effect.pipe( Effect.catchTag("DatabaseError", (err) => Effect.fail(new EntityNotFoundError({ entityType: context.entityType, entityId: context.entityId, message: `${context.entityType} not found`, })) ), Effect.catchTag("ConnectionError", (err) => Effect.fail(new ServiceUnavailableError({ message: "Database connection unavailable", cause: err.message, })) ), ) // Usage const findUser = Effect.fn("UserService.findUser")(function* (id: UserId) { return yield* repo.findById(id).pipe( withRemapDbErrors({ entityType: "User", entityId: id }) ) }) ``` ## Retryable Errors Pattern For errors that may be transient, add a `retryable` property: ```typescript export class ServiceUnavailableError extends Schema.TaggedError()( "ServiceUnavailableError", { message: Schema.String, cause: Schema.optional(Schema.String), retryable: Schema.optionalWith(Schema.Boolean, { default: () => true }), }, HttpApiSchema.annotations({ status: 503 }), ) {} export class RateLimitError extends Schema.TaggedError()( "RateLimitError", { message: Schema.String, retryAfter: Schema.optional(Schema.Number), retryable: Schema.optionalWith(Schema.Boolean, { default: () => true }), }, HttpApiSchema.annotations({ status: 429 }), ) {} // Non-retryable error export class ValidationError extends Schema.TaggedError()( "ValidationError", { message: Schema.String, field: Schema.String, retryable: Schema.optionalWith(Schema.Boolean, { default: () => false }), }, HttpApiSchema.annotations({ status: 400 }), ) {} ``` ### Retry Based on Error Property ```typescript import { Effect, Schedule } from "effect" const withRetry = ( effect: Effect.Effect ): Effect.Effect => effect.pipe( Effect.retry( Schedule.exponential("100 millis").pipe( Schedule.intersect(Schedule.recurs(3)), Schedule.whileInput((err: E) => err.retryable === true), ) ), ) // Usage yield* callExternalApi(request).pipe(withRetry) ``` ## Error Unions for Activities When defining workflow activities, use explicit error unions: ```typescript // Activity error type - union of possible errors export type GetChannelMembersError = | DatabaseError | ChannelNotFoundError export class DatabaseError extends Schema.TaggedError()( "DatabaseError", { message: Schema.String, cause: Schema.optional(Schema.String), retryable: Schema.optionalWith(Schema.Boolean, { default: () => true }), }, ) {} export class ChannelNotFoundError extends Schema.TaggedError()( "ChannelNotFoundError", { channelId: ChannelId, message: Schema.String, retryable: Schema.optionalWith(Schema.Boolean, { default: () => false }), }, ) {} // In activity definition yield* Activity.make({ name: "GetChannelMembers", success: ChannelMembersResult, error: Schema.Union(DatabaseError, ChannelNotFoundError), execute: Effect.gen(function* () { // ... }), }) ``` ## HTTP Status Codes (Without Generic Errors) **Map HTTP status codes at the error level, not by creating generic error classes.** Each explicit error can have its own HTTP status. ```typescript // ✅ CORRECT - Domain errors with HTTP status annotations export class UserNotFoundError extends Schema.TaggedError()( "UserNotFoundError", { userId: UserId, message: Schema.String }, HttpApiSchema.annotations({ status: 404 }), // Status on specific error ) {} export class ChannelNotFoundError extends Schema.TaggedError()( "ChannelNotFoundError", { channelId: ChannelId, message: Schema.String }, HttpApiSchema.annotations({ status: 404 }), // Same status, different error ) {} export class SessionExpiredError extends Schema.TaggedError()( "SessionExpiredError", { sessionId: SessionId, expiredAt: Schema.DateTimeUtc, message: Schema.String }, HttpApiSchema.annotations({ status: 401 }), ) {} export class InvalidCredentialsError extends Schema.TaggedError()( "InvalidCredentialsError", { message: Schema.String }, HttpApiSchema.annotations({ status: 401 }), // Same status, different meaning ) {} ``` ```typescript // ❌ WRONG - Generic HTTP error classes export class UnauthorizedError extends Schema.TaggedError()( "UnauthorizedError", { message: Schema.String }, HttpApiSchema.annotations({ status: 401 }), ) {} // Then mapping everything to it - loses critical information! Effect.catchTags({ SessionExpiredError: (err) => Effect.fail(new UnauthorizedError({ message: "Unauthorized" })), InvalidCredentialsError: (err) => Effect.fail(new UnauthorizedError({ message: "Unauthorized" })), MissingTokenError: (err) => Effect.fail(new UnauthorizedError({ message: "Unauthorized" })), }) // Frontend can't distinguish: expired session vs wrong password vs missing token ``` ### When Generic Errors Are Acceptable Generic errors are only acceptable for **truly unrecoverable internal errors** where: - The frontend can only show "Something went wrong" - No user action can fix it - You're hiding internal details for security ```typescript // Acceptable for unrecoverable errors export class InternalServerError extends Schema.TaggedError()( "InternalServerError", { message: Schema.String, requestId: Schema.optional(Schema.String) }, HttpApiSchema.annotations({ status: 500 }), ) {} // Use sparingly - only for truly unexpected errors Effect.catchAll((unexpectedError) => Effect.fail(new InternalServerError({ message: "An unexpected error occurred", requestId: context.requestId, })) ) ``` ## Error Logging Log errors with structured context: ```typescript const processWithLogging = Effect.fn("OrderService.process")(function* (orderId: OrderId) { return yield* processOrder(orderId).pipe( Effect.tapError((err) => Effect.log("Order processing failed", { orderId, errorTag: err._tag, errorMessage: err.message, }) ), ) }) ``` ================================================ FILE: agents/skills/effect-best-practices/references/layer-patterns.md ================================================ # Layer Patterns ## Dependencies in Effect.Service **Critical rule:** Always declare dependencies in the `dependencies` array of `Effect.Service`. This ensures proper composition and avoids "leaked dependencies" that require manual wiring at usage sites. ### Correct Pattern ```typescript export class OrderService extends Effect.Service()("OrderService", { accessors: true, dependencies: [ UserService.Default, ProductService.Default, InventoryService.Default, PaymentService.Default, ], effect: Effect.gen(function* () { const users = yield* UserService const products = yield* ProductService const inventory = yield* InventoryService const payments = yield* PaymentService // Service implementation... return { /* methods */ } }), }) {} // At app root - simple, flat composition const AppLive = Layer.mergeAll( OrderService.Default, // Other top-level services NotificationService.Default, AnalyticsService.Default, ) ``` ### Wrong Pattern (Leaked Dependencies) ```typescript // WRONG - Dependencies not declared export class OrderService extends Effect.Service()("OrderService", { accessors: true, effect: Effect.gen(function* () { const users = yield* UserService // Not in dependencies! // ... }), }) {} // Now every usage requires manual wiring const program = OrderService.create(input).pipe( Effect.provide( OrderService.Default.pipe( Layer.provide(UserService.Default), Layer.provide(ProductService.Default), // Easy to forget one, causes runtime errors ) ), ) ``` ## Infrastructure Layers Infrastructure layers (Database, Redis, HTTP clients) are **acceptable** to leave as "leaked" dependencies because: 1. They're provided once at the application root 2. They don't change between test/production (different implementations, same interface) 3. They're true infrastructure, not business logic ```typescript // Infrastructure can be provided at app root import { PgClient } from "@effect/sql-pg" const DatabaseLive = PgClient.layer({ host: Config.string("DB_HOST"), port: Config.integer("DB_PORT"), database: Config.string("DB_NAME"), username: Config.string("DB_USER"), password: Config.secret("DB_PASSWORD"), }) // Services use database but don't declare it in dependencies export class UserRepo extends Effect.Service()("UserRepo", { accessors: true, // No dependencies array - PgClient provided at app root effect: Effect.gen(function* () { const sql = yield* PgClient.PgClient const findById = Effect.fn("UserRepo.findById")(function* (id: UserId) { const rows = yield* sql`SELECT * FROM users WHERE id = ${id}`.pipe(Effect.orDie) return rows[0] as User | undefined }) return { findById } }), }) {} // App root provides infrastructure once const AppLive = Layer.mergeAll( OrderService.Default, UserService.Default, ).pipe( Layer.provide(DatabaseLive), // Infrastructure provided here Layer.provide(RedisLive), ) ``` ## Layer.mergeAll Over Nested Provides **Use `Layer.mergeAll`** for composing layers at the same level: ```typescript // CORRECT - Flat composition const ServicesLive = Layer.mergeAll( UserService.Default, OrderService.Default, ProductService.Default, NotificationService.Default, ) const InfrastructureLive = Layer.mergeAll( DatabaseLive, RedisLive, HttpClientLive, ) const AppLive = ServicesLive.pipe( Layer.provide(InfrastructureLive), ) ``` ```typescript // WRONG - Deeply nested, hard to read const AppLive = UserService.Default.pipe( Layer.provide( OrderService.Default.pipe( Layer.provide( ProductService.Default.pipe( Layer.provide(DatabaseLive), ), ), ), ), ) ``` ## Layer Naming Conventions Use suffixes to indicate layer type: - `ServiceLive` - Production implementation - `ServiceTest` - Test/mock implementation - `ServiceLayer` - Generic layer (rare) ```typescript // Production export const UserServiceLive = UserService.Default // Test with mocks export const UserServiceTest = Layer.succeed( UserService, UserService.of({ findById: (id) => Effect.succeed(mockUser), create: (input) => Effect.succeed({ id: UserId.make("test-id"), ...input }), }) ) // Test with in-memory state export class UserServiceInMemory extends Effect.Service()("UserService", { accessors: true, effect: Effect.gen(function* () { const store = new Map() return { findById: Effect.fn("UserService.findById")(function* (id) { const user = store.get(id) if (!user) return yield* Effect.fail(new UserNotFoundError({ userId: id })) return user }), create: Effect.fn("UserService.create")(function* (input) { const user = { id: UserId.make(crypto.randomUUID()), ...input } store.set(user.id, user) return user }), } }), }) {} ``` ## Layer.unwrapEffect for Config-Dependent Layers When a layer needs async configuration: ```typescript import { Config, Effect, Layer } from "effect" // Layer that depends on config const ApiClientLive = Layer.unwrapEffect( Effect.gen(function* () { const apiKey = yield* Config.string("API_KEY") const baseUrl = yield* Config.string("API_BASE_URL") const timeout = yield* Config.integer("API_TIMEOUT").pipe( Config.withDefault(5000) ) return Layer.succeed( ApiClient, new ApiClientImpl({ apiKey, baseUrl, timeout }) ) }) ) // Layer that validates config const ValidatedConfigLive = Layer.unwrapEffect( Effect.gen(function* () { const config = yield* Config.all({ dbUrl: Config.string("DATABASE_URL"), redisUrl: Config.string("REDIS_URL"), port: Config.integer("PORT"), }) // Validate config if (!config.dbUrl.startsWith("postgresql://")) { return yield* Effect.fail(new ConfigError({ message: "Invalid DATABASE_URL" })) } return Layer.succeed(AppConfig, config) }) ) ``` ## Scoped Layers For resources that need cleanup: ```typescript import { Effect, Layer, Scope } from "effect" // Resource that needs cleanup const DatabaseConnectionLive = Layer.scoped( DatabaseConnection, Effect.acquireRelease( Effect.gen(function* () { const pool = yield* createPool(config) yield* Effect.log("Database pool created") return pool }), (pool) => Effect.gen(function* () { yield* pool.end() yield* Effect.log("Database pool closed") }).pipe(Effect.orDie) ) ) // Service using scoped resource export class UserRepo extends Effect.Service()("UserRepo", { accessors: true, effect: Effect.gen(function* () { const db = yield* DatabaseConnection return { findById: Effect.fn("UserRepo.findById")(function* (id) { return yield* db.query("SELECT * FROM users WHERE id = $1", [id]) }), } }), }) {} ``` ## Testing Layer Composition ```typescript // test/setup.ts import { Layer } from "effect" export const TestLive = Layer.mergeAll( UserServiceTest, OrderServiceTest, ProductServiceTest, ).pipe( Layer.provide(InMemoryDatabaseLive), ) // test/user.test.ts import { Effect } from "effect" import { TestLive } from "./setup" describe("UserService", () => { it("creates users", async () => { const program = Effect.gen(function* () { const user = yield* UserService.create({ email: "test@example.com", name: "Test User", }) expect(user.email).toBe("test@example.com") }) await Effect.runPromise(program.pipe(Effect.provide(TestLive))) }) }) ``` ## Layer.effect vs Layer.succeed ```typescript // Layer.succeed - for static values (no effects) const ConfigLive = Layer.succeed(AppConfig, { port: 3000, env: "development", }) // Layer.effect - when construction needs effects const LoggerLive = Layer.effect( Logger, Effect.gen(function* () { const config = yield* AppConfig const transport = config.env === "production" ? createCloudTransport() : createConsoleTransport() return new LoggerImpl(transport) }) ) ``` ## Lazy Layers For expensive initialization that should be deferred: ```typescript const ExpensiveServiceLive = Layer.lazy(() => { // This code runs only when the layer is first used return Layer.effect( ExpensiveService, Effect.gen(function* () { yield* Effect.log("Initializing expensive service...") const client = yield* createExpensiveClient() return new ExpensiveServiceImpl(client) }) ) }) ``` ================================================ FILE: agents/skills/effect-best-practices/references/observability-patterns.md ================================================ # Observability Patterns ## Structured Logging with Effect.log **Always use Effect.log** instead of console.log. Effect.log provides: - Structured data - Log levels - Integration with telemetry systems - Testability ### Basic Logging ```typescript // Simple message yield* Effect.log("Processing started") // With structured data yield* Effect.log("Processing order", { orderId, userId, amount, currency, }) // Different log levels yield* Effect.logDebug("Cache lookup", { key, hit: true }) yield* Effect.logInfo("User logged in", { userId }) yield* Effect.logWarning("Rate limit approaching", { current: 95, limit: 100 }) yield* Effect.logError("Payment failed", { orderId, reason: error.message }) yield* Effect.logFatal("Database connection lost") ``` ### Logging in Services ```typescript const processOrder = Effect.fn("OrderService.processOrder")(function* (input: OrderInput) { yield* Effect.log("Starting order processing", { orderId: input.orderId }) const result = yield* validateAndProcess(input).pipe( Effect.tap(() => Effect.log("Order processed successfully")), Effect.tapError((err) => Effect.logError("Order processing failed", { orderId: input.orderId, error: err._tag, message: err.message, }) ), ) return result }) ``` ## Effect.fn for Automatic Tracing **Always use Effect.fn** for service methods. This automatically creates spans with proper names: ```typescript // Creates span: "UserService.findById" const findById = Effect.fn("UserService.findById")(function* (id: UserId) { // Automatic span creation with: // - Start/end timing // - Error capture // - Parameter tracking (if annotated) }) // Creates span: "PaymentService.processPayment" const processPayment = Effect.fn("PaymentService.processPayment")( function* (orderId: OrderId, amount: number) { // ... } ) ``` ### Naming Convention Use `ServiceName.methodName` format consistently: - `UserService.findById` - `OrderService.create` - `PaymentService.refund` - `NotificationService.sendEmail` ## Span Annotations Add important context to spans, but don't overdo it: ```typescript const processOrder = Effect.fn("OrderService.process")(function* (orderId: OrderId) { // GOOD - Important business identifiers yield* Effect.annotateCurrentSpan("orderId", orderId) yield* Effect.annotateCurrentSpan("userId", order.userId) yield* Effect.annotateCurrentSpan("totalAmount", order.total) // BAD - Too much detail, creates noise // yield* Effect.annotateCurrentSpan("step", "validating") // yield* Effect.annotateCurrentSpan("itemCount", order.items.length) // yield* Effect.annotateCurrentSpan("item0Name", order.items[0].name) }) ``` ### What to Annotate **Do annotate:** - Entity IDs (orderId, userId, etc.) - Important business values (amounts, statuses) - Error context when failing **Don't annotate:** - Step-by-step progress - Individual item details - Internal implementation state - Sensitive data (PII, secrets) ## Metrics ### Counter ```typescript import { Metric } from "effect" // Define metrics at module level const ordersProcessed = Metric.counter("orders_processed", { description: "Total orders processed", }) const ordersFailed = Metric.counter("orders_failed", { description: "Total orders that failed processing", }) // Use in service const processOrder = Effect.fn("OrderService.process")(function* (input: OrderInput) { return yield* process(input).pipe( Effect.tap(() => Metric.increment(ordersProcessed)), Effect.tapError(() => Metric.increment(ordersFailed)), ) }) ``` ### Counter with Tags ```typescript const httpRequests = Metric.counter("http_requests_total", { description: "Total HTTP requests", }) // Tag with method and status yield* Metric.increment(httpRequests).pipe( Metric.tagged("method", request.method), Metric.tagged("status", String(response.status)), Metric.tagged("path", request.path), ) ``` ### Gauge ```typescript const activeConnections = Metric.gauge("active_connections", { description: "Number of active connections", }) // Update gauge yield* Metric.set(activeConnections, connectionCount) // Or increment/decrement yield* Metric.increment(activeConnections) yield* Metric.decrement(activeConnections) ``` ### Histogram ```typescript const requestDuration = Metric.histogram("request_duration_ms", { description: "Request duration in milliseconds", boundaries: [10, 50, 100, 250, 500, 1000, 2500, 5000], }) // Record value yield* Metric.update(requestDuration, durationMs) // Or use timer helper const timedEffect = effect.pipe( Metric.timerWithHistogram(requestDuration), ) ``` ## Configuration with Config **Always use Config** instead of process.env: ### Basic Config ```typescript import { Config, Effect } from "effect" const config = Config.all({ port: Config.integer("PORT").pipe(Config.withDefault(3000)), host: Config.string("HOST").pipe(Config.withDefault("localhost")), env: Config.literal("development", "staging", "production")("NODE_ENV"), }) // Use in layer const ServerLive = Layer.unwrapEffect( Effect.gen(function* () { const { port, host, env } = yield* config return Layer.succeed(ServerConfig, { port, host, env }) }) ) ``` ### Config with Validation ```typescript const dbConfig = Config.all({ host: Config.string("DB_HOST"), port: Config.integer("DB_PORT").pipe( Config.validate({ message: "Port must be between 1 and 65535", validation: (p) => p >= 1 && p <= 65535, }) ), database: Config.string("DB_NAME"), maxConnections: Config.integer("DB_MAX_CONNECTIONS").pipe( Config.withDefault(10), Config.validate({ message: "Max connections must be positive", validation: (n) => n > 0, }) ), }) ``` ### Secret Config ```typescript // For sensitive values that shouldn't be logged const secretConfig = Config.all({ apiKey: Config.secret("API_KEY"), // Returns Secret dbPassword: Config.secret("DB_PASSWORD"), }) // Using secrets const program = Effect.gen(function* () { const { apiKey, dbPassword } = yield* secretConfig // Secret values are wrapped - use Secret.value to unwrap const key = Secret.value(apiKey) // Logging a Secret shows "[REDACTED]" yield* Effect.log("Config loaded", { apiKey }) // Safe - shows [REDACTED] }) ``` ### Config with Nested Structure ```typescript const appConfig = Config.all({ server: Config.all({ port: Config.integer("SERVER_PORT"), host: Config.string("SERVER_HOST"), }), database: Config.all({ url: Config.string("DATABASE_URL"), pool: Config.integer("DATABASE_POOL_SIZE").pipe(Config.withDefault(10)), }), features: Config.all({ enableBeta: Config.boolean("ENABLE_BETA").pipe(Config.withDefault(false)), maxUploadSize: Config.integer("MAX_UPLOAD_SIZE").pipe(Config.withDefault(10485760)), }), }) ``` ## Log Level Configuration ```typescript import { Logger, LogLevel } from "effect" // Set log level via config const LoggerLive = Layer.unwrapEffect( Effect.gen(function* () { const level = yield* Config.literal( "debug", "info", "warning", "error" )("LOG_LEVEL").pipe(Config.withDefault("info")) const logLevel = { debug: LogLevel.Debug, info: LogLevel.Info, warning: LogLevel.Warning, error: LogLevel.Error, }[level] return Logger.minimumLogLevel(logLevel) }) ) // Production: structured JSON logging const JsonLoggerLive = Logger.json ``` ## Combining Observability ```typescript const processOrder = Effect.fn("OrderService.process")(function* (input: OrderInput) { const startTime = yield* Effect.clockWith((clock) => clock.currentTimeMillis) // Annotate span yield* Effect.annotateCurrentSpan("orderId", input.orderId) yield* Effect.annotateCurrentSpan("userId", input.userId) // Log start yield* Effect.log("Processing order", { orderId: input.orderId }) const result = yield* process(input).pipe( Effect.tap((order) => Effect.gen(function* () { const endTime = yield* Effect.clockWith((c) => c.currentTimeMillis) const duration = endTime - startTime // Record metric yield* Metric.update(orderProcessingDuration, duration) yield* Metric.increment(ordersProcessed) // Log completion yield* Effect.log("Order processed", { orderId: input.orderId, durationMs: duration, }) }) ), Effect.tapError((err) => Effect.gen(function* () { yield* Metric.increment(ordersFailed) yield* Effect.logError("Order processing failed", { orderId: input.orderId, error: err._tag, }) }) ), ) return result }) ``` ================================================ FILE: agents/skills/effect-best-practices/references/rpc-cluster-patterns.md ================================================ # RPC & Cluster Patterns ## RpcGroup for API Organization **Use `RpcGroup.make`** to organize related RPC endpoints: ```typescript import { Rpc, RpcGroup } from "@effect/rpc" import { Schema } from "effect" // Group related operations export const UserRpc = RpcGroup.make("User", { // Queries (read operations) findById: Rpc.query({ input: UserId, output: User, error: UserNotFoundError, }), list: Rpc.query({ input: Schema.Struct({ organizationId: OrganizationId, limit: Schema.optionalWith(Schema.Number, { default: () => 50 }), offset: Schema.optionalWith(Schema.Number, { default: () => 0 }), }), output: Schema.Array(User), error: Schema.Never, }), // Mutations (write operations) create: Rpc.mutation({ input: CreateUserInput, output: User, error: Schema.Union(UserCreateError, ValidationError), }), update: Rpc.mutation({ input: Schema.Struct({ id: UserId, data: UpdateUserInput, }), output: User, error: Schema.Union(UserNotFoundError, ValidationError), }), delete: Rpc.mutation({ input: UserId, output: Schema.Void, error: UserNotFoundError, }), }) ``` ### Query vs Mutation - **Rpc.query** - Read operations, idempotent, cacheable - **Rpc.mutation** - Write operations, may have side effects ```typescript // Query - safe to retry, can be cached findById: Rpc.query({ ... }), search: Rpc.query({ ... }), list: Rpc.query({ ... }), // Mutation - may modify state create: Rpc.mutation({ ... }), update: Rpc.mutation({ ... }), delete: Rpc.mutation({ ... }), ``` ## Error Unions in RPC **Always use explicit error unions** for RPC error types: ```typescript // Explicit union of possible errors create: Rpc.mutation({ input: CreateOrderInput, output: Order, error: Schema.Union( ValidationError, InsufficientInventoryError, PaymentFailedError, UserNotFoundError, ), }), // NOT - generic error type create: Rpc.mutation({ input: CreateOrderInput, output: Order, error: GenericError, // WRONG - loses type information }), ``` ## RPC Middleware for Authentication ```typescript import { RpcMiddleware, Rpc } from "@effect/rpc" import { Effect, Layer } from "effect" // Context type for authenticated user export class CurrentUser extends Context.Tag("CurrentUser")< CurrentUser, { id: UserId; role: UserRole; organizationId: OrganizationId } >() {} // Auth middleware - extracts and validates auth export class AuthMiddleware extends RpcMiddleware.Tag()( "AuthMiddleware", { provides: CurrentUser, failure: UnauthorizedError, } ) {} // Middleware implementation export const AuthMiddlewareLive = Layer.effect( AuthMiddleware, Effect.gen(function* () { const authService = yield* AuthService return AuthMiddleware.of({ execute: (request) => Effect.gen(function* () { const token = request.headers.get("authorization")?.replace("Bearer ", "") if (!token) { return yield* Effect.fail(new UnauthorizedError({ message: "Missing token" })) } const user = yield* authService.validateToken(token).pipe( Effect.catchTag("TokenExpiredError", () => Effect.fail(new UnauthorizedError({ message: "Token expired" })) ), Effect.catchTag("TokenInvalidError", () => Effect.fail(new UnauthorizedError({ message: "Invalid token" })) ), ) return user }), }) }) ) // Protected RPC using middleware export const ProtectedUserRpc = UserRpc.middleware(AuthMiddleware) ``` ## Workflow Definition **Use `Workflow.make`** with explicit idempotency keys: ```typescript import { Workflow } from "@effect/cluster" import { Schema } from "effect" export const OrderFulfillmentWorkflow = Workflow.make({ name: "OrderFulfillmentWorkflow", payload: { id: OrderId, // Execution ID orderId: OrderId, userId: UserId, items: Schema.Array(OrderItem), shippingAddress: ShippingAddress, }, // Idempotency key prevents duplicate processing idempotencyKey: ({ orderId }) => orderId, }) export const NotificationWorkflow = Workflow.make({ name: "NotificationWorkflow", payload: { id: Schema.String, // Unique execution ID messageId: MessageId, channelId: ChannelId, authorId: UserId, }, idempotencyKey: ({ messageId }) => messageId, }) ``` ### Workflow Implementation ```typescript import { Activity } from "@effect/workflow" import { Effect } from "effect" export const OrderFulfillmentWorkflowLayer = OrderFulfillmentWorkflow.toLayer( Effect.fn("OrderFulfillmentWorkflow")(function* (payload) { // Step 1: Reserve inventory const reservation = yield* Activity.make({ name: "ReserveInventory", success: InventoryReservation, error: Schema.Union(InsufficientInventoryError, DatabaseError), execute: Effect.gen(function* () { const inventory = yield* InventoryService return yield* inventory.reserve(payload.items) }), }) // Step 2: Process payment const payment = yield* Activity.make({ name: "ProcessPayment", success: PaymentResult, error: Schema.Union(PaymentFailedError, PaymentTimeoutError), execute: Effect.gen(function* () { const payments = yield* PaymentService return yield* payments.charge(payload.userId, payload.items) }), }) // Step 3: Create shipment const shipment = yield* Activity.make({ name: "CreateShipment", success: Shipment, error: Schema.Union(ShippingError, AddressInvalidError), execute: Effect.gen(function* () { const shipping = yield* ShippingService return yield* shipping.createShipment({ items: payload.items, address: payload.shippingAddress, reservationId: reservation.id, }) }), }) // Step 4: Send confirmation yield* Activity.make({ name: "SendConfirmation", success: Schema.Void, error: NotificationError, execute: Effect.gen(function* () { const notifications = yield* NotificationService yield* notifications.sendOrderConfirmation({ userId: payload.userId, orderId: payload.orderId, trackingNumber: shipment.trackingNumber, }) }), }) return { shipment, payment } }) ) ``` ## Activity Patterns **Always include `success` and `error` schemas** in Activity.make: ```typescript // CORRECT - schemas specified yield* Activity.make({ name: "SendEmail", success: EmailSentResult, error: Schema.Union(EmailDeliveryError, EmailTemplateError), execute: Effect.gen(function* () { // Implementation return { messageId: "msg-123", sentAt: new Date() } }), }) // WRONG - missing schemas yield* Activity.make({ name: "SendEmail", execute: Effect.gen(function* () { // This will not serialize properly across workflow restarts }), }) ``` ### Activity Error Handling with Retryable ```typescript export class ExternalApiError extends Schema.TaggedError()( "ExternalApiError", { message: Schema.String, statusCode: Schema.Number, retryable: Schema.Boolean, }, ) { static fromResponse(response: Response): ExternalApiError { return new ExternalApiError({ message: `API error: ${response.statusText}`, statusCode: response.status, retryable: response.status >= 500, // 5xx errors are retryable }) } } yield* Activity.make({ name: "CallExternalApi", success: ApiResponse, error: ExternalApiError, execute: Effect.gen(function* () { const response = yield* fetch(url) if (!response.ok) { return yield* Effect.fail(ExternalApiError.fromResponse(response)) } return yield* response.json() }), }) ``` ## ClusterCron for Scheduled Jobs ```typescript import { ClusterCron } from "@effect/cluster" export const DailyReportCron = ClusterCron.make({ name: "DailyReportCron", // Cron expression: every day at 6 AM UTC schedule: "0 6 * * *", }) // Implementation export const DailyReportCronLayer = DailyReportCron.toLayer( Effect.fn("DailyReportCron")(function* () { yield* Effect.log("Starting daily report generation") const reports = yield* ReportService yield* reports.generateDailyReport() yield* Effect.log("Daily report generation complete") }) ) ``` ## Triggering Workflows ### From HTTP Handler ```typescript import { HttpApi, HttpApiEndpoint } from "@effect/platform" const createOrder = HttpApiEndpoint.post("createOrder", "/orders") .setPayload(CreateOrderInput) .addSuccess(Order) .addError(ValidationError) // Handler triggers workflow const createOrderHandler = Effect.gen(function* () { const input = yield* HttpApi.payload const workflowClient = yield* WorkflowClient // Create order in database const order = yield* OrderService.create(input) // Trigger async fulfillment workflow yield* workflowClient.workflows.OrderFulfillmentWorkflow.execute({ id: order.id, orderId: order.id, userId: input.userId, items: input.items, shippingAddress: input.shippingAddress, }) return order }) ``` ### From Backend Service ```typescript export class MessageService extends Effect.Service()("MessageService", { accessors: true, dependencies: [MessageRepo.Default, WorkflowClient.Default], effect: Effect.gen(function* () { const repo = yield* MessageRepo const workflows = yield* WorkflowClient const create = Effect.fn("MessageService.create")(function* (input: CreateMessageInput) { const message = yield* repo.create(input) // Trigger notification workflow yield* workflows.workflows.NotificationWorkflow.execute({ id: message.id, messageId: message.id, channelId: message.channelId, authorId: message.authorId, }) return message }) return { create } }), }) {} ``` ## Workflow HTTP API ```typescript // Expose workflow execution via HTTP const executeWorkflow = HttpApiEndpoint.post("executeWorkflow", "/workflows/:name/execute") .setPath(Schema.Struct({ name: Schema.String })) .setPayload(Schema.Unknown) .addSuccess(Schema.Struct({ executionId: Schema.String })) .addError(WorkflowNotFoundError) // Handler const executeWorkflowHandler = Effect.gen(function* () { const { name } = yield* HttpApi.path const payload = yield* HttpApi.payload const client = yield* WorkflowClient const workflow = client.workflows[name] if (!workflow) { return yield* Effect.fail(new WorkflowNotFoundError({ name })) } const result = yield* workflow.execute(payload) return { executionId: payload.id } }) ``` ================================================ FILE: agents/skills/effect-best-practices/references/schema-patterns.md ================================================ # Schema Patterns ## Branded Types for IDs **Always brand entity IDs** to prevent accidentally passing the wrong ID type: ```typescript import { Schema } from "effect" // Entity IDs - always branded with namespace export const UserId = Schema.UUID.pipe(Schema.brand("@App/UserId")) export type UserId = Schema.Schema.Type export const OrganizationId = Schema.UUID.pipe(Schema.brand("@App/OrganizationId")) export type OrganizationId = Schema.Schema.Type export const OrderId = Schema.UUID.pipe(Schema.brand("@App/OrderId")) export type OrderId = Schema.Schema.Type export const ProductId = Schema.UUID.pipe(Schema.brand("@App/ProductId")) export type ProductId = Schema.Schema.Type ``` ### Branding Convention Use `@Namespace/EntityName` format: - `@App/UserId` - Main application entities - `@Billing/InvoiceId` - Billing domain entities - `@External/StripeCustomerId` - External system IDs ### Creating Branded Values ```typescript // From string (validates UUID format) const userId = Schema.decodeSync(UserId)("123e4567-e89b-12d3-a456-426614174000") // Generate new ID const newUserId = UserId.make(crypto.randomUUID()) // Type error - can't mix ID types const order = yield* orderService.findById(userId) // Error: UserId is not OrderId ``` ### When NOT to Brand Don't brand simple strings that don't need type safety: ```typescript // NOT branded - acceptable export const Url = Schema.String export const FilePath = Schema.String export const EmailAddress = Schema.String.pipe(Schema.pattern(/^[^\s@]+@[^\s@]+\.[^\s@]+$/)) // These don't need branding because: // 1. They don't cross service boundaries in ways that could be confused // 2. They're typically validated by format, not by type ``` ## Schema.Struct for Domain Types **Prefer Schema.Struct** over TypeScript interfaces for domain types: ```typescript // CORRECT - Schema.Struct export const User = Schema.Struct({ id: UserId, email: Schema.String, name: Schema.String, organizationId: OrganizationId, role: Schema.Literal("admin", "member", "viewer"), createdAt: Schema.DateTimeUtc, updatedAt: Schema.DateTimeUtc, }) export type User = Schema.Schema.Type // Can derive encoded type for database/API export type UserEncoded = Schema.Schema.Encoded ``` ### Input Types for Mutations ```typescript export const CreateUserInput = Schema.Struct({ email: Schema.String.pipe( Schema.pattern(/^[^\s@]+@[^\s@]+\.[^\s@]+$/), Schema.annotations({ description: "Valid email address" }), ), name: Schema.String.pipe( Schema.minLength(1), Schema.maxLength(100), ), organizationId: OrganizationId, role: Schema.optionalWith( Schema.Literal("admin", "member", "viewer"), { default: () => "member" as const } ), }) export type CreateUserInput = Schema.Schema.Type export const UpdateUserInput = Schema.Struct({ name: Schema.optional(Schema.String.pipe(Schema.minLength(1))), role: Schema.optional(Schema.Literal("admin", "member", "viewer")), }) export type UpdateUserInput = Schema.Schema.Type ``` ## Schema.transform and transformOrFail **Use transforms** instead of manual parsing: ```typescript // Transform string to Date export const DateFromString = Schema.transform( Schema.String, Schema.DateTimeUtc, { decode: (s) => new Date(s), encode: (d) => d.toISOString(), } ) // Transform with validation (can fail) export const PositiveNumber = Schema.transformOrFail( Schema.Number, Schema.Number.pipe(Schema.brand("PositiveNumber")), { decode: (n, _, ast) => n > 0 ? ParseResult.succeed(n as Schema.Schema.Type) : ParseResult.fail(new ParseResult.Type(ast, n, "Must be positive")), encode: ParseResult.succeed, } ) ``` ### Common Transforms ```typescript // JSON string to object export const JsonFromString = (schema: Schema.Schema) => Schema.transform( Schema.String, schema, { decode: (s) => JSON.parse(s), encode: (a) => JSON.stringify(a), } ) // Comma-separated string to array export const CommaSeparatedList = Schema.transform( Schema.String, Schema.Array(Schema.String), { decode: (s) => s.split(",").map((x) => x.trim()).filter(Boolean), encode: (arr) => arr.join(","), } ) // Cents to dollars export const DollarsFromCents = Schema.transform( Schema.Number.pipe(Schema.int()), Schema.Number, { decode: (cents) => cents / 100, encode: (dollars) => Math.round(dollars * 100), } ) ``` ## Schema.Class for Entities with Methods Use `Schema.Class` when entities need methods: ```typescript export class User extends Schema.Class("User")({ id: UserId, email: Schema.String, name: Schema.String, role: Schema.Literal("admin", "member", "viewer"), createdAt: Schema.DateTimeUtc, }) { get isAdmin(): boolean { return this.role === "admin" } get displayName(): string { return this.name || this.email.split("@")[0] } canAccessResource(resource: Resource): boolean { if (this.isAdmin) return true return resource.ownerId === this.id } } // Usage const user = new User({ id: UserId.make(crypto.randomUUID()), email: "alice@example.com", name: "Alice", role: "member", createdAt: new Date(), }) console.log(user.displayName) // "Alice" console.log(user.isAdmin) // false ``` ## Schema.annotations Add annotations for documentation and validation messages: ```typescript export const CreateOrderInput = Schema.Struct({ productId: ProductId.pipe( Schema.annotations({ description: "The product to order" }), ), quantity: Schema.Number.pipe( Schema.int(), Schema.positive(), Schema.annotations({ description: "Number of items to order", examples: [1, 5, 10], }), ), shippingAddress: Schema.Struct({ line1: Schema.String.pipe(Schema.annotations({ description: "Street address" })), line2: Schema.optional(Schema.String), city: Schema.String, state: Schema.String.pipe(Schema.length(2)), zip: Schema.String.pipe(Schema.pattern(/^\d{5}(-\d{4})?$/)), }).pipe(Schema.annotations({ description: "Shipping destination" })), }).pipe( Schema.annotations({ title: "Create Order Input", description: "Input for creating a new order", }), ) ``` ## Optional Fields Use `Schema.optional` and `Schema.optionalWith`: ```typescript export const UserPreferences = Schema.Struct({ // Optional, undefined if not provided theme: Schema.optional(Schema.Literal("light", "dark")), // Optional with default value language: Schema.optionalWith(Schema.String, { default: () => "en" }), // Optional with null support (for database compatibility) bio: Schema.NullOr(Schema.String), // Optional but must be present if set (no undefined) timezone: Schema.optional(Schema.String, { exact: true }), }) ``` ## Union Types and Discriminated Unions ```typescript // Simple union export const PaymentMethod = Schema.Union( Schema.Literal("card"), Schema.Literal("bank_transfer"), Schema.Literal("crypto"), ) // Discriminated union (tagged) export const PaymentDetails = Schema.Union( Schema.Struct({ _tag: Schema.Literal("Card"), cardNumber: Schema.String, expiry: Schema.String, cvv: Schema.String, }), Schema.Struct({ _tag: Schema.Literal("BankTransfer"), accountNumber: Schema.String, routingNumber: Schema.String, }), Schema.Struct({ _tag: Schema.Literal("Crypto"), walletAddress: Schema.String, network: Schema.Literal("ethereum", "bitcoin", "solana"), }), ) export type PaymentDetails = Schema.Schema.Type // Usage with match const processPayment = (details: PaymentDetails) => { switch (details._tag) { case "Card": return processCard(details.cardNumber, details.expiry, details.cvv) case "BankTransfer": return processBankTransfer(details.accountNumber, details.routingNumber) case "Crypto": return processCrypto(details.walletAddress, details.network) } } ``` ## Enums and Literals ```typescript // Use Literal for small, fixed sets export const UserRole = Schema.Literal("admin", "member", "viewer") export type UserRole = Schema.Schema.Type // Use Enums for larger sets or when you need runtime values export const OrderStatus = Schema.Enums({ Pending: "pending", Processing: "processing", Shipped: "shipped", Delivered: "delivered", Cancelled: "cancelled", } as const) export type OrderStatus = Schema.Schema.Type ``` ## Recursive Schemas ```typescript interface Category { id: string name: string children: readonly Category[] } export const Category: Schema.Schema = Schema.Struct({ id: Schema.String, name: Schema.String, children: Schema.Array(Schema.suspend(() => Category)), }) ``` ## Decoding and Encoding ```typescript // Decode (parse) - use in services const parseUser = Schema.decodeUnknown(User) const result = yield* parseUser(rawData) // Effect // Decode sync - only in controlled contexts const user = Schema.decodeUnknownSync(User)(rawData) // Encode - for serialization const encodeUser = Schema.encode(User) const encoded = yield* encodeUser(user) // Effect ``` ================================================ FILE: agents/skills/effect-best-practices/references/service-patterns.md ================================================ # Service Patterns ## Effect.Service Over Context.Tag **Always prefer `Effect.Service`** for defining business logic services. This is the modern, recommended approach that provides: 1. **Built-in `Default` layer** - No manual layer creation needed 2. **Automatic accessors** - Direct method calls via `ServiceName.method()` 3. **Proper dependency declaration** - Dependencies are explicit and type-checked 4. **Consistent structure** - All services follow the same pattern ### Basic Service Definition ```typescript import { Effect, Layer } from "effect" export class UserService extends Effect.Service()("UserService", { accessors: true, effect: Effect.gen(function* () { const findById = Effect.fn("UserService.findById")(function* (id: UserId) { // Implementation }) const findByEmail = Effect.fn("UserService.findByEmail")(function* (email: string) { // Implementation }) const create = Effect.fn("UserService.create")(function* (input: CreateUserInput) { // Implementation }) return { findById, findByEmail, create } }), }) {} ``` ### Service with Dependencies **Critical:** Always declare dependencies using the `dependencies` array. This ensures: - Dependencies are automatically provided when using `ServiceName.Default` - Type errors if dependencies are missing - No manual `Layer.provide` at usage sites ```typescript export class OrderService extends Effect.Service()("OrderService", { accessors: true, dependencies: [ UserService.Default, ProductService.Default, InventoryService.Default, ], effect: Effect.gen(function* () { // Dependencies are automatically available const users = yield* UserService const products = yield* ProductService const inventory = yield* InventoryService const create = Effect.fn("OrderService.create")(function* (input: CreateOrderInput) { // Validate user exists const user = yield* users.findById(input.userId) // Check product availability const product = yield* products.findById(input.productId) const available = yield* inventory.checkAvailability(input.productId, input.quantity) if (!available) { return yield* Effect.fail(new InsufficientInventoryError({ productId: input.productId, message: "Not enough inventory", })) } // Create order... }) return { create } }), }) {} ``` ### Wrong: Leaking Dependencies ```typescript // WRONG - Dependencies not declared, must be provided manually export class OrderService extends Effect.Service()("OrderService", { accessors: true, effect: Effect.gen(function* () { const users = yield* UserService // Dependency not in `dependencies` array! // ... }), }) {} // Now every usage site must do this: const program = OrderService.create(input).pipe( Effect.provide(UserService.Default), // Annoying and error-prone ) ``` ## Effect.fn for Tracing **Always wrap service methods with `Effect.fn`**. This provides automatic tracing with meaningful span names. ### Naming Convention Use `ServiceName.methodName` format for span names: ```typescript const findById = Effect.fn("UserService.findById")(function* (id: UserId) { yield* Effect.annotateCurrentSpan("userId", id) // Implementation }) const processPayment = Effect.fn("PaymentService.processPayment")( function* (orderId: OrderId, amount: number, currency: string) { yield* Effect.annotateCurrentSpan("orderId", orderId) yield* Effect.annotateCurrentSpan("amount", amount) yield* Effect.annotateCurrentSpan("currency", currency) // Implementation } ) ``` ### Annotating Spans Add important context to spans, but don't overdo it: ```typescript // CORRECT - Important business identifiers yield* Effect.annotateCurrentSpan("userId", userId) yield* Effect.annotateCurrentSpan("orderId", orderId) yield* Effect.annotateCurrentSpan("amount", amount) // WRONG - Too much detail, noise in traces yield* Effect.annotateCurrentSpan("userEmail", user.email) yield* Effect.annotateCurrentSpan("userName", user.name) yield* Effect.annotateCurrentSpan("userCreatedAt", user.createdAt) yield* Effect.annotateCurrentSpan("step", "validating") yield* Effect.annotateCurrentSpan("step", "processing") yield* Effect.annotateCurrentSpan("step", "completing") ``` ## When Context.Tag is Acceptable `Context.Tag` is appropriate **only** for infrastructure that's injected at runtime: ### Cloudflare Worker Bindings ```typescript import { Context } from "effect" // These are provided by the runtime, not created by our code export class KVNamespace extends Context.Tag("KVNamespace")< KVNamespace, CloudflareKVNamespace >() {} export class R2Bucket extends Context.Tag("R2Bucket")< R2Bucket, CloudflareR2Bucket >() {} // In the worker entry point const handler = { fetch(request: Request, env: Env) { return program.pipe( Effect.provideService(KVNamespace, env.MY_KV), Effect.provideService(R2Bucket, env.MY_BUCKET), Effect.runPromise, ) } } ``` ### Database/Redis Clients (Infrastructure) ```typescript // Infrastructure provided at app root - acceptable as Context.Tag // But prefer using @effect/sql or similar typed clients import { PgClient } from "@effect/sql-pg" // PgClient is already a Context.Tag from the library // Just provide it at the app root const DatabaseLive = PgClient.layer({ host: Config.string("DB_HOST"), port: Config.integer("DB_PORT"), database: Config.string("DB_NAME"), // ... }) ``` ## Single Responsibility Each service should have a focused responsibility: ```typescript // CORRECT - Focused services export class UserService extends Effect.Service()("UserService", { /* user operations */ }) {} export class AuthService extends Effect.Service()("AuthService", { /* auth operations */ }) {} export class NotificationService extends Effect.Service()("NotificationService", { /* notifications */ }) {} // WRONG - God service doing everything export class AppService extends Effect.Service()("AppService", { effect: Effect.gen(function* () { return { createUser, deleteUser, login, logout, sendEmail, sendPush, processPayment, // ... 50 more methods } }), }) {} ``` ## Service Interface Patterns ### Return Types Services should return `Effect` types, never `Promise`: ```typescript // CORRECT const findById = Effect.fn("UserService.findById")( function* (id: UserId): Effect.Effect { // ... } ) // WRONG - Promise in service interface const findById = async (id: UserId): Promise => { // ... } ``` ### Use Option for Nullable Results ```typescript // CORRECT - findById can fail, findByIdOption returns Option const findById = Effect.fn("UserService.findById")( function* (id: UserId): Effect.Effect { const maybeUser = yield* repo.findById(id) return yield* Option.match(maybeUser, { onNone: () => Effect.fail(new UserNotFoundError({ userId: id, message: "Not found" })), onSome: Effect.succeed, }) } ) const findByIdOption = Effect.fn("UserService.findByIdOption")( function* (id: UserId): Effect.Effect> { return yield* repo.findById(id) } ) ``` ## Testing Services Create test implementations using the same pattern: ```typescript // Test implementation export const UserServiceTest = Layer.succeed( UserService, UserService.of({ findById: (id) => Effect.succeed(mockUser), create: (input) => Effect.succeed({ ...mockUser, ...input }), }) ) // Or with Effect.Service for stateful mocks export class UserServiceTest extends Effect.Service()("UserService", { accessors: true, effect: Effect.gen(function* () { const users = new Map() const findById = Effect.fn("UserService.findById")(function* (id: UserId) { const user = users.get(id) if (!user) return yield* Effect.fail(new UserNotFoundError({ userId: id, message: "Not found" })) return user }) const create = Effect.fn("UserService.create")(function* (input: CreateUserInput) { const user = { id: UserId.make(crypto.randomUUID()), ...input } users.set(user.id, user) return user }) return { findById, create } }), }) {} ``` ================================================ FILE: agents/skills/emil-design-eng/SKILL.md ================================================ --- name: emil-design-eng description: This skill encodes Emil Kowalski's philosophy on UI polish, component design, animation decisions, and the invisible details that make software feel great. --- # Design Engineering You are a design engineer with the craft sensibility. You build interfaces where every detail compounds into something that feels right. You understand that in a world where everyone's software is good enough, taste is the differentiator. ## Core Philosophy ### Taste is trained, not innate Good taste is not personal preference. It is a trained instinct: the ability to see beyond the obvious and recognize what elevates. You develop it by surrounding yourself with great work, thinking deeply about why something feels good, and practicing relentlessly. When building UI, don't just make it work. Study why the best interfaces feel the way they do. Reverse engineer animations. Inspect interactions. Be curious. ### Unseen details compound Most details users never consciously notice. That is the point. When a feature functions exactly as someone assumes it should, they proceed without giving it a second thought. That is the goal. > "All those unseen details combine to produce something that's just stunning, like a thousand barely audible voices all singing in tune." - Paul Graham Every decision below exists because the aggregate of invisible correctness creates interfaces people love without knowing why. ### Beauty is leverage People select tools based on the overall experience, not just functionality. Good defaults and good animations are real differentiators. Beauty is underutilized in software. Use it as leverage to stand out. ## Review Format (Required) When reviewing UI code, you MUST use a markdown table with Before/After columns. Do NOT use a list with "Before:" and "After:" on separate lines. Always output an actual markdown table like this: | Before | After | Why | | --- | --- | --- | | `transition: all 300ms` | `transition: transform 200ms ease-out` | Specify exact properties; avoid `all` | | `transform: scale(0)` | `transform: scale(0.95); opacity: 0` | Nothing in the real world appears from nothing | | `ease-in` on dropdown | `ease-out` with custom curve | `ease-in` feels sluggish; `ease-out` gives instant feedback | | No `:active` state on button | `transform: scale(0.97)` on `:active` | Buttons must feel responsive to press | | `transform-origin: center` on popover | `transform-origin: var(--radix-popover-content-transform-origin)` | Popovers should scale from their trigger (not modals — modals stay centered) | Wrong format (never do this): ``` Before: transition: all 300ms After: transition: transform 200ms ease-out ──────────────────────────── Before: scale(0) After: scale(0.95) ``` Correct format: A single markdown table with | Before | After | Why | columns, one row per issue found. The "Why" column briefly explains the reasoning. ## The Animation Decision Framework Before writing any animation code, answer these questions in order: ### 1. Should this animate at all? **Ask:** How often will users see this animation? | Frequency | Decision | | ----------------------------------------------------------- | ---------------------------- | | 100+ times/day (keyboard shortcuts, command palette toggle) | No animation. Ever. | | Tens of times/day (hover effects, list navigation) | Remove or drastically reduce | | Occasional (modals, drawers, toasts) | Standard animation | | Rare/first-time (onboarding, feedback forms, celebrations) | Can add delight | **Never animate keyboard-initiated actions.** These actions are repeated hundreds of times daily. Animation makes them feel slow, delayed, and disconnected from the user's actions. Raycast has no open/close animation. That is the optimal experience for something used hundreds of times a day. ### 2. What is the purpose? Every animation must have a clear answer to "why does this animate?" Valid purposes: - **Spatial consistency**: toast enters and exits from the same direction, making swipe-to-dismiss feel intuitive - **State indication**: a morphing feedback button shows the state change - **Explanation**: a marketing animation that shows how a feature works - **Feedback**: a button scales down on press, confirming the interface heard the user - **Preventing jarring changes**: elements appearing or disappearing without transition feel broken If the purpose is just "it looks cool" and the user will see it often, don't animate. ### 3. What easing should it use? Is the element entering or exiting? Yes → ease-out (starts fast, feels responsive) No → Is it moving/morphing on screen? Yes → ease-in-out (natural acceleration/deceleration) Is it a hover/color change? Yes → ease Is it constant motion (marquee, progress bar)? Yes → linear Default → ease-out **Critical: use custom easing curves.** The built-in CSS easings are too weak. They lack the punch that makes animations feel intentional. ```css /* Strong ease-out for UI interactions */ --ease-out: cubic-bezier(0.23, 1, 0.32, 1); /* Strong ease-in-out for on-screen movement */ --ease-in-out: cubic-bezier(0.77, 0, 0.175, 1); /* iOS-like drawer curve (from Ionic Framework) */ --ease-drawer: cubic-bezier(0.32, 0.72, 0, 1); ``` **Never use ease-in for UI animations.** It starts slow, which makes the interface feel sluggish and unresponsive. A dropdown with `ease-in` at 300ms _feels_ slower than `ease-out` at the same 300ms, because ease-in delays the initial movement — the exact moment the user is watching most closely. **Easing curve resources:** Don't create curves from scratch. Use [easing.dev](https://easing.dev/) or [easings.co](https://easings.co/) to find stronger custom variants of standard easings. ### 4. How fast should it be? | Element | Duration | | ------------------------ | ------------- | | Button press feedback | 100-160ms | | Tooltips, small popovers | 125-200ms | | Dropdowns, selects | 150-250ms | | Modals, drawers | 200-500ms | | Marketing/explanatory | Can be longer | **Rule: UI animations should stay under 300ms.** A 180ms dropdown feels more responsive than a 400ms one. A faster-spinning spinner makes the app feel like it loads faster, even when the load time is identical. ### Perceived performance Speed in animation is not just about feeling snappy — it directly affects how users perceive your app's performance: - A **fast-spinning spinner** makes loading feel faster (same load time, different perception) - A **180ms select** animation feels more responsive than a **400ms** one - **Instant tooltips** after the first one is open (skip delay + skip animation) make the whole toolbar feel faster The perception of speed matters as much as actual speed. Easing amplifies this: `ease-out` at 200ms _feels_ faster than `ease-in` at 200ms because the user sees immediate movement. ## Spring Animations Springs feel more natural than duration-based animations because they simulate real physics. They don't have fixed durations — they settle based on physical parameters. ### When to use springs - Drag interactions with momentum - Elements that should feel "alive" (like Apple's Dynamic Island) - Gestures that can be interrupted mid-animation - Decorative mouse-tracking interactions ### Spring-based mouse interactions Tying visual changes directly to mouse position feels artificial because it lacks motion. Use `useSpring` from Motion (formerly Framer Motion) to interpolate value changes with spring-like behavior instead of updating immediately. ```jsx import { useSpring } from 'framer-motion'; // Without spring: feels artificial, instant const rotation = mouseX * 0.1; // With spring: feels natural, has momentum const springRotation = useSpring(mouseX * 0.1, { stiffness: 100, damping: 10, }); ``` This works because the animation is **decorative** — it doesn't serve a function. If this were a functional graph in a banking app, no animation would be better. Know when decoration helps and when it hinders. ### Spring configuration **Apple's approach (recommended — easier to reason about):** ```js { type: "spring", duration: 0.5, bounce: 0.2 } ``` **Traditional physics (more control):** ```js { type: "spring", mass: 1, stiffness: 100, damping: 10 } ``` Keep bounce subtle (0.1-0.3) when used. Avoid bounce in most UI contexts. Use it for drag-to-dismiss and playful interactions. ### Interruptibility advantage Springs maintain velocity when interrupted — CSS animations and keyframes restart from zero. This makes springs ideal for gestures users might change mid-motion. When you click an expanded item and quickly press Escape, a spring-based animation smoothly reverses from its current position. ## Component Building Principles ### Buttons must feel responsive Add `transform: scale(0.97)` on `:active`. This gives instant feedback, making the UI feel like it is truly listening to the user. ```css .button { transition: transform 160ms ease-out; } .button:active { transform: scale(0.97); } ``` This applies to any pressable element. The scale should be subtle (0.95-0.98). ### Never animate from scale(0) Nothing in the real world disappears and reappears completely. Elements animating from `scale(0)` look like they come out of nowhere. Start from `scale(0.9)` or higher, combined with opacity. Even a barely-visible initial scale makes the entrance feel more natural, like a balloon that has a visible shape even when deflated. ```css /* Bad */ .entering { transform: scale(0); } /* Good */ .entering { transform: scale(0.95); opacity: 0; } ``` ### Make popovers origin-aware Popovers should scale in from their trigger, not from center. The default `transform-origin: center` is wrong for almost every popover. **Exception: modals.** Modals should keep `transform-origin: center` because they are not anchored to a specific trigger — they appear centered in the viewport. ```css /* Radix UI */ .popover { transform-origin: var(--radix-popover-content-transform-origin); } /* Base UI */ .popover { transform-origin: var(--transform-origin); } ``` Whether the user notices the difference individually does not matter. In the aggregate, unseen details become visible. They compound. ### Tooltips: skip delay on subsequent hovers Tooltips should delay before appearing to prevent accidental activation. But once one tooltip is open, hovering over adjacent tooltips should open them instantly with no animation. This feels faster without defeating the purpose of the initial delay. ```css .tooltip { transition: transform 125ms ease-out, opacity 125ms ease-out; transform-origin: var(--transform-origin); } .tooltip[data-starting-style], .tooltip[data-ending-style] { opacity: 0; transform: scale(0.97); } /* Skip animation on subsequent tooltips */ .tooltip[data-instant] { transition-duration: 0ms; } ``` ### Use CSS transitions over keyframes for interruptible UI CSS transitions can be interrupted and retargeted mid-animation. Keyframes restart from zero. For any interaction that can be triggered rapidly (adding toasts, toggling states), transitions produce smoother results. ```css /* Interruptible - good for UI */ .toast { transition: transform 400ms ease; } /* Not interruptible - avoid for dynamic UI */ @keyframes slideIn { from { transform: translateY(100%); } to { transform: translateY(0); } } ``` ### Use blur to mask imperfect transitions When a crossfade between two states feels off despite trying different easings and durations, add subtle `filter: blur(2px)` during the transition. **Why blur works:** Without blur, you see two distinct objects during a crossfade — the old state and the new state overlapping. This looks unnatural. Blur bridges the visual gap by blending the two states together, tricking the eye into perceiving a single smooth transformation instead of two objects swapping. Combine blur with scale-on-press (`scale(0.97)`) for a polished button state transition: ```css .button { transition: transform 160ms ease-out; } .button:active { transform: scale(0.97); } .button-content { transition: filter 200ms ease, opacity 200ms ease; } .button-content.transitioning { filter: blur(2px); opacity: 0.7; } ``` Keep blur under 20px. Heavy blur is expensive, especially in Safari. ### Animate enter states with @starting-style The modern CSS way to animate element entry without JavaScript: ```css .toast { opacity: 1; transform: translateY(0); transition: opacity 400ms ease, transform 400ms ease; @starting-style { opacity: 0; transform: translateY(100%); } } ``` This replaces the common React pattern of using `useEffect` to set `mounted: true` after initial render. Use `@starting-style` when browser support allows; fall back to the `data-mounted` attribute pattern otherwise. ```jsx // Legacy pattern (still works everywhere) useEffect(() => { setMounted(true); }, []); //
``` ## CSS Transform Mastery ### translateY with percentages Percentage values in `translate()` are relative to the element's own size. Use `translateY(100%)` to move an element by its own height, regardless of actual dimensions. This is how Sonner positions toasts and how Vaul hides the drawer before animating in. ```css /* Works regardless of drawer height */ .drawer-hidden { transform: translateY(100%); } /* Works regardless of toast height */ .toast-enter { transform: translateY(-100%); } ``` Prefer percentages over hardcoded pixel values. They are less error-prone and adapt to content. ### scale() scales children too Unlike `width`/`height`, `scale()` also scales an element's children. When scaling a button on press, the font size, icons, and content scale proportionally. This is a feature, not a bug. ### 3D transforms for depth `rotateX()`, `rotateY()` with `transform-style: preserve-3d` create real 3D effects in CSS. Orbiting animations, coin flips, and depth effects are all possible without JavaScript. ```css .wrapper { transform-style: preserve-3d; } @keyframes orbit { from { transform: translate(-50%, -50%) rotateY(0deg) translateZ(72px) rotateY(360deg); } to { transform: translate(-50%, -50%) rotateY(360deg) translateZ(72px) rotateY(0deg); } } ``` ### transform-origin Every element has an anchor point from which transforms execute. The default is center. Set it to match where the trigger lives for origin-aware interactions. ## clip-path for Animation `clip-path` is not just for shapes. It is one of the most powerful animation tools in CSS. ### The inset shape `clip-path: inset(top right bottom left)` defines a rectangular clipping region. Each value "eats" into the element from that side. ```css /* Fully hidden from right */ .hidden { clip-path: inset(0 100% 0 0); } /* Fully visible */ .visible { clip-path: inset(0 0 0 0); } /* Reveal from left to right */ .overlay { clip-path: inset(0 100% 0 0); transition: clip-path 200ms ease-out; } .button:active .overlay { clip-path: inset(0 0 0 0); transition: clip-path 2s linear; } ``` ### Tabs with perfect color transitions Duplicate the tab list. Style the copy as "active" (different background, different text color). Clip the copy so only the active tab is visible. Animate the clip on tab change. This creates a seamless color transition that timing individual color transitions can never achieve. ### Hold-to-delete pattern Use `clip-path: inset(0 100% 0 0)` on a colored overlay. On `:active`, transition to `inset(0 0 0 0)` over 2s with linear timing. On release, snap back with 200ms ease-out. Add `scale(0.97)` on the button for press feedback. ### Image reveals on scroll Start with `clip-path: inset(0 0 100% 0)` (hidden from bottom). Animate to `inset(0 0 0 0)` when the element enters the viewport. Use `IntersectionObserver` or Framer Motion's `useInView` with `{ once: true, margin: "-100px" }`. ### Comparison sliders Overlay two images. Clip the top one with `clip-path: inset(0 50% 0 0)`. Adjust the right inset value based on drag position. No extra DOM elements needed, fully hardware-accelerated. ## Gesture and Drag Interactions ### Momentum-based dismissal Don't require dragging past a threshold. Calculate velocity: `Math.abs(dragDistance) / elapsedTime`. If velocity exceeds ~0.11, dismiss regardless of distance. A quick flick should be enough. ```js const timeTaken = new Date().getTime() - dragStartTime.current.getTime(); const velocity = Math.abs(swipeAmount) / timeTaken; if (Math.abs(swipeAmount) >= SWIPE_THRESHOLD || velocity > 0.11) { dismiss(); } ``` ### Damping at boundaries When a user drags past the natural boundary (e.g., dragging a drawer up when already at top), apply damping. The more they drag, the less the element moves. Things in real life don't suddenly stop; they slow down first. ### Pointer capture for drag Once dragging starts, set the element to capture all pointer events. This ensures dragging continues even if the pointer leaves the element bounds. ### Multi-touch protection Ignore additional touch points after the initial drag begins. Without this, switching fingers mid-drag causes the element to jump to the new position. ```js function onPress() { if (isDragging) return; // Start drag... } ``` ### Friction instead of hard stops Instead of preventing upward drag entirely, allow it with increasing friction. It feels more natural than hitting an invisible wall. ## Performance Rules ### Only animate transform and opacity These properties skip layout and paint, running on the GPU. Animating `padding`, `margin`, `height`, or `width` triggers all three rendering steps. ### CSS variables are inheritable Changing a CSS variable on a parent recalculates styles for all children. In a drawer with many items, updating `--swipe-amount` on the container causes expensive style recalculation. Update `transform` directly on the element instead. ```js // Bad: triggers recalc on all children element.style.setProperty('--swipe-amount', `${distance}px`); // Good: only affects this element element.style.transform = `translateY(${distance}px)`; ``` ### Framer Motion hardware acceleration caveat Framer Motion's shorthand properties (`x`, `y`, `scale`) are NOT hardware-accelerated. They use `requestAnimationFrame` on the main thread. For hardware acceleration, use the full `transform` string: ```jsx // NOT hardware accelerated (convenient but drops frames under load) // Hardware accelerated (stays smooth even when main thread is busy) ``` This matters when the browser is simultaneously loading content, running scripts, or painting. At Vercel, the dashboard tab animation used Shared Layout Animations and dropped frames during page loads. Switching to CSS animations (off main thread) fixed it. ### CSS animations beat JS under load CSS animations run off the main thread. When the browser is busy loading a new page, Framer Motion animations (using `requestAnimationFrame`) drop frames. CSS animations remain smooth. Use CSS for predetermined animations; JS for dynamic, interruptible ones. ### Use WAAPI for programmatic CSS animations The Web Animations API gives you JavaScript control with CSS performance. Hardware-accelerated, interruptible, and no library needed. ```js element.animate([{ clipPath: 'inset(0 0 100% 0)' }, { clipPath: 'inset(0 0 0 0)' }], { duration: 1000, fill: 'forwards', easing: 'cubic-bezier(0.77, 0, 0.175, 1)', }); ``` ## Accessibility ### prefers-reduced-motion Animations can cause motion sickness. Reduced motion means fewer and gentler animations, not zero. Keep opacity and color transitions that aid comprehension. Remove movement and position animations. ```css @media (prefers-reduced-motion: reduce) { .element { animation: fade 0.2s ease; /* No transform-based motion */ } } ``` ```jsx const shouldReduceMotion = useReducedMotion(); const closedX = shouldReduceMotion ? 0 : '-100%'; ``` ### Touch device hover states ```css @media (hover: hover) and (pointer: fine) { .element:hover { transform: scale(1.05); } } ``` Touch devices trigger hover on tap, causing false positives. Gate hover animations behind this media query. ## The Sonner Principles (Building Loved Components) These principles come from building Sonner (13M+ weekly npm downloads) and apply to any component: 1. **Developer experience is key.** No hooks, no context, no complex setup. Insert `` once, call `toast()` from anywhere. The less friction to adopt, the more people will use it. 2. **Good defaults matter more than options.** Ship beautiful out of the box. Most users never customize. The default easing, timing, and visual design should be excellent. 3. **Naming creates identity.** "Sonner" (French for "to ring") feels more elegant than "react-toast". Sacrifice discoverability for memorability when appropriate. 4. **Handle edge cases invisibly.** Pause toast timers when the tab is hidden. Fill gaps between stacked toasts with pseudo-elements to maintain hover state. Capture pointer events during drag. Users never notice these, and that is exactly right. 5. **Use transitions, not keyframes, for dynamic UI.** Toasts are added rapidly. Keyframes restart from zero on interruption. Transitions retarget smoothly. 6. **Build a great documentation site.** Let people touch the product, play with it, and understand it before they use it. Interactive examples with ready-to-use code snippets lower the barrier to adoption. ### Cohesion matters Sonner's animation feels satisfying partly because the whole experience is cohesive. The easing and duration fit the vibe of the library. It is slightly slower than typical UI animations and uses `ease` rather than `ease-out` to feel more elegant. The animation style matches the toast design, the page design, the name — everything is in harmony. When choosing animation values, consider the personality of the component. A playful component can be bouncier. A professional dashboard should be crisp and fast. Match the motion to the mood. ### The opacity + height combination When items enter and exit a list (like Family's drawer), the opacity change must work well with the height animation. This is often trial and error. There is no formula — you adjust until it feels right. ### Review your work the next day Review animations with fresh eyes. You notice imperfections the next day that you missed during development. Play animations in slow motion or frame by frame to spot timing issues that are invisible at full speed. ### Asymmetric enter/exit timing Pressing should be slow when it needs to be deliberate (hold-to-delete: 2s linear), but release should always be snappy (200ms ease-out). This pattern applies broadly: slow where the user is deciding, fast where the system is responding. ```css /* Release: fast */ .overlay { transition: clip-path 200ms ease-out; } /* Press: slow and deliberate */ .button:active .overlay { transition: clip-path 2s linear; } ``` ## Stagger Animations When multiple elements enter together, stagger their appearance. Each element animates in with a small delay after the previous one. This creates a cascading effect that feels more natural than everything appearing at once. ```css .item { opacity: 0; transform: translateY(8px); animation: fadeIn 300ms ease-out forwards; } .item:nth-child(1) { animation-delay: 0ms; } .item:nth-child(2) { animation-delay: 50ms; } .item:nth-child(3) { animation-delay: 100ms; } .item:nth-child(4) { animation-delay: 150ms; } @keyframes fadeIn { to { opacity: 1; transform: translateY(0); } } ``` Keep stagger delays short (30-80ms between items). Long delays make the interface feel slow. Stagger is decorative — never block interaction while stagger animations are playing. ## Debugging Animations ### Slow motion testing Play animations at reduced speed to spot issues invisible at full speed. Temporarily increase duration to 2-5x normal, or use browser DevTools animation inspector to slow playback. Things to look for in slow motion: - Do colors transition smoothly, or do you see two distinct states overlapping? - Does the easing feel right, or does it start/stop abruptly? - Is the transform-origin correct, or does the element scale from the wrong point? - Are multiple animated properties (opacity, transform, color) in sync? ### Frame-by-frame inspection Step through animations frame by frame in Chrome DevTools (Animations panel). This reveals timing issues between coordinated properties that you cannot see at full speed. ### Test on real devices For touch interactions (drawers, swipe gestures), test on physical devices. Connect your phone via USB, visit your local dev server by IP address, and use Safari's remote devtools. The Xcode Simulator is an alternative but real hardware is better for gesture testing. ## Review Checklist When reviewing UI code, check for: | Issue | Fix | | ------------------------------------------ | ---------------------------------------------------------------- | | `transition: all` | Specify exact properties: `transition: transform 200ms ease-out` | | `scale(0)` entry animation | Start from `scale(0.95)` with `opacity: 0` | | `ease-in` on UI element | Switch to `ease-out` or custom curve | | `transform-origin: center` on popover | Set to trigger location or use Radix/Base UI CSS variable (modals are exempt — keep centered) | | Animation on keyboard action | Remove animation entirely | | Duration > 300ms on UI element | Reduce to 150-250ms | | Hover animation without media query | Add `@media (hover: hover) and (pointer: fine)` | | Keyframes on rapidly-triggered element | Use CSS transitions for interruptibility | | Framer Motion `x`/`y` props under load | Use `transform: "translateX()"` for hardware acceleration | | Same enter/exit transition speed | Make exit faster than enter (e.g., enter 2s, exit 200ms) | | Elements all appear at once | Add stagger delay (30-80ms between items) | ================================================ FILE: agents/skills/frontend-design/LICENSE.txt ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS ================================================ FILE: agents/skills/frontend-design/SKILL.md ================================================ --- name: frontend-design description: "Design and implement distinctive, production-ready frontend interfaces with strong aesthetic direction. Use when asked to create or restyle web pages, components, or applications (HTML/CSS/JS, React, Vue, etc.)." --- # Frontend Design Skill Design and implement memorable frontend interfaces with a clear, intentional aesthetic. The output must be real, working code — not just mood boards. This skill is about **design thinking + execution**: every visual choice should be rooted in purpose and context. ## When to Use Use this skill when the user wants to: - Create a new web page, landing page, dashboard, or app UI - Design or redesign frontend components or screens - Improve typography, layout, color, motion, or overall visual polish - Convert a concept or brief into a high‑fidelity, coded interface ## Inputs to Gather (or Assume) Before coding, identify: - **Purpose & audience**: What problem does this UI solve? Who uses it? - **Brand/voice**: Any reference brands, tone, or visual inspiration? - **Technical constraints**: Framework, library, CSS strategy, accessibility, performance - **Content constraints**: Required copy, assets, data, features If the user did not provide this, ask **2–4 targeted questions**, or state reasonable assumptions in a short preface. ## Design Thinking (Required) Commit to a **single, bold aesthetic direction**. Name it and execute it consistently. Examples: - Brutalist / raw / utilitarian - Editorial / magazine / typographic - Luxury / refined / minimal - Retro‑futuristic / cyber / neon - Art‑deco / geometric / ornamental - Handcrafted / organic / textured **Avoid generic AI aesthetics.** No “default” fonts, color schemes, or stock layouts. Before writing code, define the system: 1. **Visual direction** — one sentence that describes the vibe 2. **Differentiator** — what should be memorable about this UI? 3. **Typography system** — display + body fonts, scale, weight, casing 4. **Color system** — dominant, accent, neutral; define as CSS variables 5. **Layout strategy** — grid rhythm, spacing scale, hierarchy plan 6. **Motion strategy** — 1–2 meaningful interaction moments If the user wants code only, skip the explanation but still follow this internally. ## Implementation Principles - **Working code**: HTML/CSS/JS or framework code that runs as‑is - **Semantic & accessible**: headings, labels, focus states, keyboard nav - **Responsive**: fluid layouts, breakpoints, responsive typography - **Tokenized styling**: CSS variables for colors, spacing, radii, shadows - **Modern layout**: prefer CSS Grid/Flex, avoid brittle positioning hacks ## Aesthetic Guidelines ### Typography - Typography should define the voice of the design - Avoid default fonts (Inter, Roboto, Arial, system stacks) - Use a **distinct display font** + a **refined body font** - Implement a clear hierarchy (size, weight, spacing, casing) ### Color & Theme - Commit to a palette with a strong point‑of‑view - Avoid timid, overused gradients (e.g., purple‑to‑pink on white) - Use contrast intentionally and check legibility ### Composition & Layout - Encourage asymmetry, scale contrast, overlap, or grid breaks - Use negative space deliberately (or controlled density if maximalist) - Create visual rhythm and hierarchy through spacing and alignment ### Detail & Atmosphere - Add texture or depth when appropriate (noise, grain, subtle patterns) - Use shadows/glows only when they serve the concept - Consider unique borders, masks, or clip‑paths for distinct shapes ### Motion & Interaction - Use motion sparingly but meaningfully - Favor one standout interaction over many tiny ones - Honor `prefers-reduced-motion` ## Avoid - Cookie‑cutter hero + 3 card layouts - Generic gradients and default font choices - Unmotivated decorative elements - Overly flat, characterless component libraries ## Deliverables - Provide full code with file names or component boundaries - Make customization easy with CSS variables or config objects - If assets are needed, provide inline SVGs or generative CSS patterns ## Quality Checklist (Self‑validate) - Aesthetic direction is unmistakable - Typography feels intentional and expressive - Layout and spacing are consistent and purposeful - Color palette feels cohesive and legible - Interactions enhance the experience without clutter - Code runs as provided and is production‑ready **Remember:** a design is only as strong as its commitment. Choose a direction and execute it relentlessly. ================================================ FILE: agents/skills/grill-me/SKILL.md ================================================ --- name: grill-me description: Interview the user relentlessly about a plan or design until reaching shared understanding, resolving each branch of the decision tree. Use when user wants to stress-test a plan, get grilled on their design, or mentions "grill me". --- Interview me relentlessly about every aspect of this plan until we reach a shared understanding. Walk down each branch of the design tree, resolving dependencies between decisions one-by-one. For each question, provide your recommended answer. Ask the questions one at a time. If a question can be answered by exploring the codebase, explore the codebase instead. ================================================ FILE: agents/skills/initz/SKILL.md ================================================ --- name: initz version: 1.0.0 description: Initialize or improve AGENTS.md --- # AGENTS.md Initialization Analyze this codebase and create an AGENTS.md file containing: 1. Build/lint/test commands - especially for running a single test 2. Code style guidelines including imports, formatting, types, naming conventions, error handling, etc. ## Guidelines - Target length: ~150 lines - Include Cursor rules (from `.cursor/rules/` or `.cursorrules`) if present - Include Copilot rules (from `.github/copilot-instructions.md`) if present - If there's already an AGENTS.md at `${path}`, improve it ## Progressive Disclosure Approach 1. **Find contradictions**: Identify conflicting instructions and ask which to keep 2. **Identify the essentials**: Extract what belongs in root AGENTS.md: - One-sentence project description - Package manager (if not npm) - Non-standard build/typecheck commands - Anything truly relevant to every single task 3. **Group the rest**: Organize remaining instructions into logical categories (TypeScript conventions, testing patterns, API design, Git workflow) 4. **Create the file structure**: - Minimal root AGENTS.md with markdown links to separate files - Each separate file with relevant instructions - Suggested docs/ folder structure 5. **Flag for deletion**: Identify redundant, vague, or overly obvious instructions $ARGUMENTS ================================================ FILE: agents/skills/jj/SKILL.md ================================================ --- name: jj description: Uses the jj (Jujutsu) version control system. Use when asked about jj commands, git push/fetch workflow, or rebasing onto main for non git repo --- # jj (Jujutsu) Workflow Jujutsu is a Git-compatible VCS. This documents the user's workflow. ## Aliases - jj tug: Move closest bookmark to @- (Advances bookmark to parent of working copy) - jj retrunk: Rebase onto trunk() (Rebases current branch onto latest main/master) - jj lg: Log recent 10 (Shows all revisions, limit 10) - jj compare: Compare working copy with parent (Shows changes between working copy and parent) ## Key Concepts - `@` refers to the working copy commit - `@-` refers to the parent of working copy - `trunk()` finds the most recent main/master/trunk on remote - `closest_bookmark(@-)` finds the nearest bookmark ancestor ## Conflict Resolution When conflicts occur after `jj retrunk`: 1. `jj status` shows conflicted files 2. Edit files to resolve conflicts 3. `jj squash` or continue working - jj auto-tracks changes ================================================ FILE: agents/skills/pi-improvements/SKILL.md ================================================ --- name: pi-improvements description: When encountering pi tool limitations, propose focused extensions that solve one problem each. --- ## Rules - When edit fails due to whitespace/indentation, propose line-based editing - When file changes between read and write are detected, propose stale-file guarding - When bash output is noisy, propose structured output parsing - When edits apply blindly, propose preview diffs - Each extension must solve exactly one problem - Prefer `~/.pi/extensions/name.ts` over folders ## Extension Patterns **Edit by Line**: Replace exact-text matching with line numbers ```typescript edit_line({ path: "foo.tsx", start: 45, end: 52, replacement: "..." }) ``` **Edit Preview**: Intercept `edit` tool, show unified diff in TUI overlay **Bash Summarize**: Parse known tools (vp check, npm test), return structured errors **File Stale Guard**: Store mtime on `read`, warn on `write` if changed ## When to Propose - After 2+ failed edit attempts due to formatting - When user manually pipes bash output through grep/head - When user discovers wrong edit only after running checks - When formatter modifies files between read and write ================================================ FILE: agents/skills/react-best-practices/AGENTS.md ================================================ # React Best Practices **Version 1.0.0** Vercel Engineering January 2026 > **Note:** > This document is mainly for agents and LLMs to follow when maintaining, > generating, or refactoring React and Next.js codebases at Vercel. Humans > may also find it useful, but guidance here is optimized for automation > and consistency by AI-assisted workflows. --- ## Abstract Comprehensive performance optimization guide for React and Next.js applications, designed for AI agents and LLMs. Contains 40+ rules across 8 categories, prioritized by impact from critical (eliminating waterfalls, reducing bundle size) to incremental (advanced patterns). Each rule includes detailed explanations, real-world examples comparing incorrect vs. correct implementations, and specific impact metrics to guide automated refactoring and code generation. --- ## Table of Contents 1. [Eliminating Waterfalls](#1-eliminating-waterfalls) — **CRITICAL** - 1.1 [Defer Await Until Needed](#11-defer-await-until-needed) - 1.2 [Dependency-Based Parallelization](#12-dependency-based-parallelization) - 1.3 [Prevent Waterfall Chains in API Routes](#13-prevent-waterfall-chains-in-api-routes) - 1.4 [Promise.all() for Independent Operations](#14-promiseall-for-independent-operations) - 1.5 [Strategic Suspense Boundaries](#15-strategic-suspense-boundaries) 2. [Bundle Size Optimization](#2-bundle-size-optimization) — **CRITICAL** - 2.1 [Avoid Barrel File Imports](#21-avoid-barrel-file-imports) - 2.2 [Conditional Module Loading](#22-conditional-module-loading) - 2.3 [Defer Non-Critical Third-Party Libraries](#23-defer-non-critical-third-party-libraries) - 2.4 [Dynamic Imports for Heavy Components](#24-dynamic-imports-for-heavy-components) - 2.5 [Preload Based on User Intent](#25-preload-based-on-user-intent) 3. [Server-Side Performance](#3-server-side-performance) — **HIGH** - 3.1 [Authenticate Server Actions Like API Routes](#31-authenticate-server-actions-like-api-routes) - 3.2 [Avoid Duplicate Serialization in RSC Props](#32-avoid-duplicate-serialization-in-rsc-props) - 3.3 [Cross-Request LRU Caching](#33-cross-request-lru-caching) - 3.4 [Minimize Serialization at RSC Boundaries](#34-minimize-serialization-at-rsc-boundaries) - 3.5 [Parallel Data Fetching with Component Composition](#35-parallel-data-fetching-with-component-composition) - 3.6 [Per-Request Deduplication with React.cache()](#36-per-request-deduplication-with-reactcache) - 3.7 [Use after() for Non-Blocking Operations](#37-use-after-for-non-blocking-operations) 4. [Client-Side Data Fetching](#4-client-side-data-fetching) — **MEDIUM-HIGH** - 4.1 [Deduplicate Global Event Listeners](#41-deduplicate-global-event-listeners) - 4.2 [Use Passive Event Listeners for Scrolling Performance](#42-use-passive-event-listeners-for-scrolling-performance) - 4.3 [Use SWR for Automatic Deduplication](#43-use-swr-for-automatic-deduplication) - 4.4 [Version and Minimize localStorage Data](#44-version-and-minimize-localstorage-data) 5. [Re-render Optimization](#5-re-render-optimization) — **MEDIUM** - 5.1 [Defer State Reads to Usage Point](#51-defer-state-reads-to-usage-point) - 5.2 [Do not wrap a simple expression with a primitive result type in useMemo](#52-do-not-wrap-a-simple-expression-with-a-primitive-result-type-in-usememo) - 5.3 [Extract Default Non-primitive Parameter Value from Memoized Component to Constant](#53-extract-default-non-primitive-parameter-value-from-memoized-component-to-constant) - 5.4 [Extract to Memoized Components](#54-extract-to-memoized-components) - 5.5 [Narrow Effect Dependencies](#55-narrow-effect-dependencies) - 5.6 [Subscribe to Derived State](#56-subscribe-to-derived-state) - 5.7 [Use Functional setState Updates](#57-use-functional-setstate-updates) - 5.8 [Use Lazy State Initialization](#58-use-lazy-state-initialization) - 5.9 [Use Transitions for Non-Urgent Updates](#59-use-transitions-for-non-urgent-updates) 6. [Rendering Performance](#6-rendering-performance) — **MEDIUM** - 6.1 [Animate SVG Wrapper Instead of SVG Element](#61-animate-svg-wrapper-instead-of-svg-element) - 6.2 [CSS content-visibility for Long Lists](#62-css-content-visibility-for-long-lists) - 6.3 [Hoist Static JSX Elements](#63-hoist-static-jsx-elements) - 6.4 [Optimize SVG Precision](#64-optimize-svg-precision) - 6.5 [Prevent Hydration Mismatch Without Flickering](#65-prevent-hydration-mismatch-without-flickering) - 6.6 [Use Activity Component for Show/Hide](#66-use-activity-component-for-showhide) - 6.7 [Use Explicit Conditional Rendering](#67-use-explicit-conditional-rendering) - 6.8 [Use useTransition Over Manual Loading States](#68-use-usetransition-over-manual-loading-states) 7. [JavaScript Performance](#7-javascript-performance) — **LOW-MEDIUM** - 7.1 [Avoid Layout Thrashing](#71-avoid-layout-thrashing) - 7.2 [Build Index Maps for Repeated Lookups](#72-build-index-maps-for-repeated-lookups) - 7.3 [Cache Property Access in Loops](#73-cache-property-access-in-loops) - 7.4 [Cache Repeated Function Calls](#74-cache-repeated-function-calls) - 7.5 [Cache Storage API Calls](#75-cache-storage-api-calls) - 7.6 [Combine Multiple Array Iterations](#76-combine-multiple-array-iterations) - 7.7 [Early Length Check for Array Comparisons](#77-early-length-check-for-array-comparisons) - 7.8 [Early Return from Functions](#78-early-return-from-functions) - 7.9 [Hoist RegExp Creation](#79-hoist-regexp-creation) - 7.10 [Use Loop for Min/Max Instead of Sort](#710-use-loop-for-minmax-instead-of-sort) - 7.11 [Use Set/Map for O(1) Lookups](#711-use-setmap-for-o1-lookups) - 7.12 [Use toSorted() Instead of sort() for Immutability](#712-use-tosorted-instead-of-sort-for-immutability) 8. [Advanced Patterns](#8-advanced-patterns) — **LOW** - 8.1 [Store Event Handlers in Refs](#81-store-event-handlers-in-refs) - 8.2 [useEffectEvent for Stable Callback Refs](#82-useeffectevent-for-stable-callback-refs) --- ## 1. Eliminating Waterfalls **Impact: CRITICAL** Waterfalls are the #1 performance killer. Each sequential await adds full network latency. Eliminating them yields the largest gains. ### 1.1 Defer Await Until Needed **Impact: HIGH (avoids blocking unused code paths)** Move `await` operations into the branches where they're actually used to avoid blocking code paths that don't need them. **Incorrect: blocks both branches** ```typescript async function handleRequest(userId: string, skipProcessing: boolean) { const userData = await fetchUserData(userId) if (skipProcessing) { // Returns immediately but still waited for userData return { skipped: true } } // Only this branch uses userData return processUserData(userData) } ``` **Correct: only blocks when needed** ```typescript async function handleRequest(userId: string, skipProcessing: boolean) { if (skipProcessing) { // Returns immediately without waiting return { skipped: true } } // Fetch only when needed const userData = await fetchUserData(userId) return processUserData(userData) } ``` **Another example: early return optimization** ```typescript // Incorrect: always fetches permissions async function updateResource(resourceId: string, userId: string) { const permissions = await fetchPermissions(userId) const resource = await getResource(resourceId) if (!resource) { return { error: 'Not found' } } if (!permissions.canEdit) { return { error: 'Forbidden' } } return await updateResourceData(resource, permissions) } // Correct: fetches only when needed async function updateResource(resourceId: string, userId: string) { const resource = await getResource(resourceId) if (!resource) { return { error: 'Not found' } } const permissions = await fetchPermissions(userId) if (!permissions.canEdit) { return { error: 'Forbidden' } } return await updateResourceData(resource, permissions) } ``` This optimization is especially valuable when the skipped branch is frequently taken, or when the deferred operation is expensive. ### 1.2 Dependency-Based Parallelization **Impact: CRITICAL (2-10× improvement)** For operations with partial dependencies, use `better-all` to maximize parallelism. It automatically starts each task at the earliest possible moment. **Incorrect: profile waits for config unnecessarily** ```typescript const [user, config] = await Promise.all([ fetchUser(), fetchConfig() ]) const profile = await fetchProfile(user.id) ``` **Correct: config and profile run in parallel** ```typescript import { all } from 'better-all' const { user, config, profile } = await all({ async user() { return fetchUser() }, async config() { return fetchConfig() }, async profile() { return fetchProfile((await this.$.user).id) } }) ``` **Alternative without extra dependencies:** ```typescript const userPromise = fetchUser() const profilePromise = userPromise.then(user => fetchProfile(user.id)) const [user, config, profile] = await Promise.all([ userPromise, fetchConfig(), profilePromise ]) ``` We can also create all the promises first, and do `Promise.all()` at the end. Reference: [https://github.com/shuding/better-all](https://github.com/shuding/better-all) ### 1.3 Prevent Waterfall Chains in API Routes **Impact: CRITICAL (2-10× improvement)** In API routes and Server Actions, start independent operations immediately, even if you don't await them yet. **Incorrect: config waits for auth, data waits for both** ```typescript export async function GET(request: Request) { const session = await auth() const config = await fetchConfig() const data = await fetchData(session.user.id) return Response.json({ data, config }) } ``` **Correct: auth and config start immediately** ```typescript export async function GET(request: Request) { const sessionPromise = auth() const configPromise = fetchConfig() const session = await sessionPromise const [config, data] = await Promise.all([ configPromise, fetchData(session.user.id) ]) return Response.json({ data, config }) } ``` For operations with more complex dependency chains, use `better-all` to automatically maximize parallelism (see Dependency-Based Parallelization). ### 1.4 Promise.all() for Independent Operations **Impact: CRITICAL (2-10× improvement)** When async operations have no interdependencies, execute them concurrently using `Promise.all()`. **Incorrect: sequential execution, 3 round trips** ```typescript const user = await fetchUser() const posts = await fetchPosts() const comments = await fetchComments() ``` **Correct: parallel execution, 1 round trip** ```typescript const [user, posts, comments] = await Promise.all([ fetchUser(), fetchPosts(), fetchComments() ]) ``` ### 1.5 Strategic Suspense Boundaries **Impact: HIGH (faster initial paint)** Instead of awaiting data in async components before returning JSX, use Suspense boundaries to show the wrapper UI faster while data loads. **Incorrect: wrapper blocked by data fetching** ```tsx async function Page() { const data = await fetchData() // Blocks entire page return (
Sidebar
Header
Footer
) } ``` The entire layout waits for data even though only the middle section needs it. **Correct: wrapper shows immediately, data streams in** ```tsx function Page() { return (
Sidebar
Header
}>
Footer
) } async function DataDisplay() { const data = await fetchData() // Only blocks this component return
{data.content}
} ``` Sidebar, Header, and Footer render immediately. Only DataDisplay waits for data. **Alternative: share promise across components** ```tsx function Page() { // Start fetch immediately, but don't await const dataPromise = fetchData() return (
Sidebar
Header
}>
Footer
) } function DataDisplay({ dataPromise }: { dataPromise: Promise }) { const data = use(dataPromise) // Unwraps the promise return
{data.content}
} function DataSummary({ dataPromise }: { dataPromise: Promise }) { const data = use(dataPromise) // Reuses the same promise return
{data.summary}
} ``` Both components share the same promise, so only one fetch occurs. Layout renders immediately while both components wait together. **When NOT to use this pattern:** - Critical data needed for layout decisions (affects positioning) - SEO-critical content above the fold - Small, fast queries where suspense overhead isn't worth it - When you want to avoid layout shift (loading → content jump) **Trade-off:** Faster initial paint vs potential layout shift. Choose based on your UX priorities. --- ## 2. Bundle Size Optimization **Impact: CRITICAL** Reducing initial bundle size improves Time to Interactive and Largest Contentful Paint. ### 2.1 Avoid Barrel File Imports **Impact: CRITICAL (200-800ms import cost, slow builds)** Import directly from source files instead of barrel files to avoid loading thousands of unused modules. **Barrel files** are entry points that re-export multiple modules (e.g., `index.js` that does `export * from './module'`). Popular icon and component libraries can have **up to 10,000 re-exports** in their entry file. For many React packages, **it takes 200-800ms just to import them**, affecting both development speed and production cold starts. **Why tree-shaking doesn't help:** When a library is marked as external (not bundled), the bundler can't optimize it. If you bundle it to enable tree-shaking, builds become substantially slower analyzing the entire module graph. **Incorrect: imports entire library** ```tsx import { Check, X, Menu } from 'lucide-react' // Loads 1,583 modules, takes ~2.8s extra in dev // Runtime cost: 200-800ms on every cold start import { Button, TextField } from '@mui/material' // Loads 2,225 modules, takes ~4.2s extra in dev ``` **Correct: imports only what you need** ```tsx import Check from 'lucide-react/dist/esm/icons/check' import X from 'lucide-react/dist/esm/icons/x' import Menu from 'lucide-react/dist/esm/icons/menu' // Loads only 3 modules (~2KB vs ~1MB) import Button from '@mui/material/Button' import TextField from '@mui/material/TextField' // Loads only what you use ``` **Alternative: Next.js 13.5+** ```js // next.config.js - use optimizePackageImports module.exports = { experimental: { optimizePackageImports: ['lucide-react', '@mui/material'] } } // Then you can keep the ergonomic barrel imports: import { Check, X, Menu } from 'lucide-react' // Automatically transformed to direct imports at build time ``` Direct imports provide 15-70% faster dev boot, 28% faster builds, 40% faster cold starts, and significantly faster HMR. Libraries commonly affected: `lucide-react`, `@mui/material`, `@mui/icons-material`, `@tabler/icons-react`, `react-icons`, `@headlessui/react`, `@radix-ui/react-*`, `lodash`, `ramda`, `date-fns`, `rxjs`, `react-use`. Reference: [https://vercel.com/blog/how-we-optimized-package-imports-in-next-js](https://vercel.com/blog/how-we-optimized-package-imports-in-next-js) ### 2.2 Conditional Module Loading **Impact: HIGH (loads large data only when needed)** Load large data or modules only when a feature is activated. **Example: lazy-load animation frames** ```tsx function AnimationPlayer({ enabled, setEnabled }: { enabled: boolean; setEnabled: React.Dispatch> }) { const [frames, setFrames] = useState(null) useEffect(() => { if (enabled && !frames && typeof window !== 'undefined') { import('./animation-frames.js') .then(mod => setFrames(mod.frames)) .catch(() => setEnabled(false)) } }, [enabled, frames, setEnabled]) if (!frames) return return } ``` The `typeof window !== 'undefined'` check prevents bundling this module for SSR, optimizing server bundle size and build speed. ### 2.3 Defer Non-Critical Third-Party Libraries **Impact: MEDIUM (loads after hydration)** Analytics, logging, and error tracking don't block user interaction. Load them after hydration. **Incorrect: blocks initial bundle** ```tsx import { Analytics } from '@vercel/analytics/react' export default function RootLayout({ children }) { return ( {children} ) } ``` **Correct: loads after hydration** ```tsx import dynamic from 'next/dynamic' const Analytics = dynamic( () => import('@vercel/analytics/react').then(m => m.Analytics), { ssr: false } ) export default function RootLayout({ children }) { return ( {children} ) } ``` ### 2.4 Dynamic Imports for Heavy Components **Impact: CRITICAL (directly affects TTI and LCP)** Use `next/dynamic` to lazy-load large components not needed on initial render. **Incorrect: Monaco bundles with main chunk ~300KB** ```tsx import { MonacoEditor } from './monaco-editor' function CodePanel({ code }: { code: string }) { return } ``` **Correct: Monaco loads on demand** ```tsx import dynamic from 'next/dynamic' const MonacoEditor = dynamic( () => import('./monaco-editor').then(m => m.MonacoEditor), { ssr: false } ) function CodePanel({ code }: { code: string }) { return } ``` ### 2.5 Preload Based on User Intent **Impact: MEDIUM (reduces perceived latency)** Preload heavy bundles before they're needed to reduce perceived latency. **Example: preload on hover/focus** ```tsx function EditorButton({ onClick }: { onClick: () => void }) { const preload = () => { if (typeof window !== 'undefined') { void import('./monaco-editor') } } return ( ) } ``` **Example: preload when feature flag is enabled** ```tsx function FlagsProvider({ children, flags }: Props) { useEffect(() => { if (flags.editorEnabled && typeof window !== 'undefined') { void import('./monaco-editor').then(mod => mod.init()) } }, [flags.editorEnabled]) return {children} } ``` The `typeof window !== 'undefined'` check prevents bundling preloaded modules for SSR, optimizing server bundle size and build speed. --- ## 3. Server-Side Performance **Impact: HIGH** Optimizing server-side rendering and data fetching eliminates server-side waterfalls and reduces response times. ### 3.1 Authenticate Server Actions Like API Routes **Impact: CRITICAL (prevents unauthorized access to server mutations)** Server Actions (functions with `"use server"`) are exposed as public endpoints, just like API routes. Always verify authentication and authorization **inside** each Server Action—do not rely solely on middleware, layout guards, or page-level checks, as Server Actions can be invoked directly. Next.js documentation explicitly states: "Treat Server Actions with the same security considerations as public-facing API endpoints, and verify if the user is allowed to perform a mutation." **Incorrect: no authentication check** ```typescript 'use server' export async function deleteUser(userId: string) { // Anyone can call this! No auth check await db.user.delete({ where: { id: userId } }) return { success: true } } ``` **Correct: authentication inside the action** ```typescript 'use server' import { verifySession } from '@/lib/auth' import { unauthorized } from '@/lib/errors' export async function deleteUser(userId: string) { // Always check auth inside the action const session = await verifySession() if (!session) { throw unauthorized('Must be logged in') } // Check authorization too if (session.user.role !== 'admin' && session.user.id !== userId) { throw unauthorized('Cannot delete other users') } await db.user.delete({ where: { id: userId } }) return { success: true } } ``` **With input validation:** ```typescript 'use server' import { verifySession } from '@/lib/auth' import { z } from 'zod' const updateProfileSchema = z.object({ userId: z.string().uuid(), name: z.string().min(1).max(100), email: z.string().email() }) export async function updateProfile(data: unknown) { // Validate input first const validated = updateProfileSchema.parse(data) // Then authenticate const session = await verifySession() if (!session) { throw new Error('Unauthorized') } // Then authorize if (session.user.id !== validated.userId) { throw new Error('Can only update own profile') } // Finally perform the mutation await db.user.update({ where: { id: validated.userId }, data: { name: validated.name, email: validated.email } }) return { success: true } } ``` Reference: [https://nextjs.org/docs/app/guides/authentication](https://nextjs.org/docs/app/guides/authentication) ### 3.2 Avoid Duplicate Serialization in RSC Props **Impact: LOW (reduces network payload by avoiding duplicate serialization)** RSC→client serialization deduplicates by object reference, not value. Same reference = serialized once; new reference = serialized again. Do transformations (`.toSorted()`, `.filter()`, `.map()`) in client, not server. **Incorrect: duplicates array** ```tsx // RSC: sends 6 strings (2 arrays × 3 items) ``` **Correct: sends 3 strings** ```tsx // RSC: send once // Client: transform there 'use client' const sorted = useMemo(() => [...usernames].sort(), [usernames]) ``` **Nested deduplication behavior:** ```tsx // string[] - duplicates everything usernames={['a','b']} sorted={usernames.toSorted()} // sends 4 strings // object[] - duplicates array structure only users={[{id:1},{id:2}]} sorted={users.toSorted()} // sends 2 arrays + 2 unique objects (not 4) ``` Deduplication works recursively. Impact varies by data type: - `string[]`, `number[]`, `boolean[]`: **HIGH impact** - array + all primitives fully duplicated - `object[]`: **LOW impact** - array duplicated, but nested objects deduplicated by reference **Operations breaking deduplication: create new references** - Arrays: `.toSorted()`, `.filter()`, `.map()`, `.slice()`, `[...arr]` - Objects: `{...obj}`, `Object.assign()`, `structuredClone()`, `JSON.parse(JSON.stringify())` **More examples:** ```tsx // ❌ Bad u.active)} /> // ✅ Good // Do filtering/destructuring in client ``` **Exception:** Pass derived data when transformation is expensive or client doesn't need original. ### 3.3 Cross-Request LRU Caching **Impact: HIGH (caches across requests)** `React.cache()` only works within one request. For data shared across sequential requests (user clicks button A then button B), use an LRU cache. **Implementation:** ```typescript import { LRUCache } from 'lru-cache' const cache = new LRUCache({ max: 1000, ttl: 5 * 60 * 1000 // 5 minutes }) export async function getUser(id: string) { const cached = cache.get(id) if (cached) return cached const user = await db.user.findUnique({ where: { id } }) cache.set(id, user) return user } // Request 1: DB query, result cached // Request 2: cache hit, no DB query ``` Use when sequential user actions hit multiple endpoints needing the same data within seconds. **With Vercel's [Fluid Compute](https://vercel.com/docs/fluid-compute):** LRU caching is especially effective because multiple concurrent requests can share the same function instance and cache. This means the cache persists across requests without needing external storage like Redis. **In traditional serverless:** Each invocation runs in isolation, so consider Redis for cross-process caching. Reference: [https://github.com/isaacs/node-lru-cache](https://github.com/isaacs/node-lru-cache) ### 3.4 Minimize Serialization at RSC Boundaries **Impact: HIGH (reduces data transfer size)** The React Server/Client boundary serializes all object properties into strings and embeds them in the HTML response and subsequent RSC requests. This serialized data directly impacts page weight and load time, so **size matters a lot**. Only pass fields that the client actually uses. **Incorrect: serializes all 50 fields** ```tsx async function Page() { const user = await fetchUser() // 50 fields return } 'use client' function Profile({ user }: { user: User }) { return
{user.name}
// uses 1 field } ``` **Correct: serializes only 1 field** ```tsx async function Page() { const user = await fetchUser() return } 'use client' function Profile({ name }: { name: string }) { return
{name}
} ``` ### 3.5 Parallel Data Fetching with Component Composition **Impact: CRITICAL (eliminates server-side waterfalls)** React Server Components execute sequentially within a tree. Restructure with composition to parallelize data fetching. **Incorrect: Sidebar waits for Page's fetch to complete** ```tsx export default async function Page() { const header = await fetchHeader() return (
{header}
) } async function Sidebar() { const items = await fetchSidebarItems() return } ``` **Correct: both fetch simultaneously** ```tsx async function Header() { const data = await fetchHeader() return
{data}
} async function Sidebar() { const items = await fetchSidebarItems() return } export default function Page() { return (
) } ``` **Alternative with children prop:** ```tsx async function Header() { const data = await fetchHeader() return
{data}
} async function Sidebar() { const items = await fetchSidebarItems() return } function Layout({ children }: { children: ReactNode }) { return (
{children}
) } export default function Page() { return ( ) } ``` ### 3.6 Per-Request Deduplication with React.cache() **Impact: MEDIUM (deduplicates within request)** Use `React.cache()` for server-side request deduplication. Authentication and database queries benefit most. **Usage:** ```typescript import { cache } from 'react' export const getCurrentUser = cache(async () => { const session = await auth() if (!session?.user?.id) return null return await db.user.findUnique({ where: { id: session.user.id } }) }) ``` Within a single request, multiple calls to `getCurrentUser()` execute the query only once. **Avoid inline objects as arguments:** `React.cache()` uses shallow equality (`Object.is`) to determine cache hits. Inline objects create new references each call, preventing cache hits. **Incorrect: always cache miss** ```typescript const getUser = cache(async (params: { uid: number }) => { return await db.user.findUnique({ where: { id: params.uid } }) }) // Each call creates new object, never hits cache getUser({ uid: 1 }) getUser({ uid: 1 }) // Cache miss, runs query again ``` **Correct: cache hit** ```typescript const params = { uid: 1 } getUser(params) // Query runs getUser(params) // Cache hit (same reference) ``` If you must pass objects, pass the same reference: **Next.js-Specific Note:** In Next.js, the `fetch` API is automatically extended with request memoization. Requests with the same URL and options are automatically deduplicated within a single request, so you don't need `React.cache()` for `fetch` calls. However, `React.cache()` is still essential for other async tasks: - Database queries (Prisma, Drizzle, etc.) - Heavy computations - Authentication checks - File system operations - Any non-fetch async work Use `React.cache()` to deduplicate these operations across your component tree. Reference: [https://react.dev/reference/react/cache](https://react.dev/reference/react/cache) ### 3.7 Use after() for Non-Blocking Operations **Impact: MEDIUM (faster response times)** Use Next.js's `after()` to schedule work that should execute after a response is sent. This prevents logging, analytics, and other side effects from blocking the response. **Incorrect: blocks response** ```tsx import { logUserAction } from '@/app/utils' export async function POST(request: Request) { // Perform mutation await updateDatabase(request) // Logging blocks the response const userAgent = request.headers.get('user-agent') || 'unknown' await logUserAction({ userAgent }) return new Response(JSON.stringify({ status: 'success' }), { status: 200, headers: { 'Content-Type': 'application/json' } }) } ``` **Correct: non-blocking** ```tsx import { after } from 'next/server' import { headers, cookies } from 'next/headers' import { logUserAction } from '@/app/utils' export async function POST(request: Request) { // Perform mutation await updateDatabase(request) // Log after response is sent after(async () => { const userAgent = (await headers()).get('user-agent') || 'unknown' const sessionCookie = (await cookies()).get('session-id')?.value || 'anonymous' logUserAction({ sessionCookie, userAgent }) }) return new Response(JSON.stringify({ status: 'success' }), { status: 200, headers: { 'Content-Type': 'application/json' } }) } ``` The response is sent immediately while logging happens in the background. **Common use cases:** - Analytics tracking - Audit logging - Sending notifications - Cache invalidation - Cleanup tasks **Important notes:** - `after()` runs even if the response fails or redirects - Works in Server Actions, Route Handlers, and Server Components Reference: [https://nextjs.org/docs/app/api-reference/functions/after](https://nextjs.org/docs/app/api-reference/functions/after) --- ## 4. Client-Side Data Fetching **Impact: MEDIUM-HIGH** Automatic deduplication and efficient data fetching patterns reduce redundant network requests. ### 4.1 Deduplicate Global Event Listeners **Impact: LOW (single listener for N components)** Use `useSWRSubscription()` to share global event listeners across component instances. **Incorrect: N instances = N listeners** ```tsx function useKeyboardShortcut(key: string, callback: () => void) { useEffect(() => { const handler = (e: KeyboardEvent) => { if (e.metaKey && e.key === key) { callback() } } window.addEventListener('keydown', handler) return () => window.removeEventListener('keydown', handler) }, [key, callback]) } ``` When using the `useKeyboardShortcut` hook multiple times, each instance will register a new listener. **Correct: N instances = 1 listener** ```tsx import useSWRSubscription from 'swr/subscription' // Module-level Map to track callbacks per key const keyCallbacks = new Map void>>() function useKeyboardShortcut(key: string, callback: () => void) { // Register this callback in the Map useEffect(() => { if (!keyCallbacks.has(key)) { keyCallbacks.set(key, new Set()) } keyCallbacks.get(key)!.add(callback) return () => { const set = keyCallbacks.get(key) if (set) { set.delete(callback) if (set.size === 0) { keyCallbacks.delete(key) } } } }, [key, callback]) useSWRSubscription('global-keydown', () => { const handler = (e: KeyboardEvent) => { if (e.metaKey && keyCallbacks.has(e.key)) { keyCallbacks.get(e.key)!.forEach(cb => cb()) } } window.addEventListener('keydown', handler) return () => window.removeEventListener('keydown', handler) }) } function Profile() { // Multiple shortcuts will share the same listener useKeyboardShortcut('p', () => { /* ... */ }) useKeyboardShortcut('k', () => { /* ... */ }) // ... } ``` ### 4.2 Use Passive Event Listeners for Scrolling Performance **Impact: MEDIUM (eliminates scroll delay caused by event listeners)** Add `{ passive: true }` to touch and wheel event listeners to enable immediate scrolling. Browsers normally wait for listeners to finish to check if `preventDefault()` is called, causing scroll delay. **Incorrect:** ```typescript useEffect(() => { const handleTouch = (e: TouchEvent) => console.log(e.touches[0].clientX) const handleWheel = (e: WheelEvent) => console.log(e.deltaY) document.addEventListener('touchstart', handleTouch) document.addEventListener('wheel', handleWheel) return () => { document.removeEventListener('touchstart', handleTouch) document.removeEventListener('wheel', handleWheel) } }, []) ``` **Correct:** ```typescript useEffect(() => { const handleTouch = (e: TouchEvent) => console.log(e.touches[0].clientX) const handleWheel = (e: WheelEvent) => console.log(e.deltaY) document.addEventListener('touchstart', handleTouch, { passive: true }) document.addEventListener('wheel', handleWheel, { passive: true }) return () => { document.removeEventListener('touchstart', handleTouch) document.removeEventListener('wheel', handleWheel) } }, []) ``` **Use passive when:** tracking/analytics, logging, any listener that doesn't call `preventDefault()`. **Don't use passive when:** implementing custom swipe gestures, custom zoom controls, or any listener that needs `preventDefault()`. ### 4.3 Use SWR for Automatic Deduplication **Impact: MEDIUM-HIGH (automatic deduplication)** SWR enables request deduplication, caching, and revalidation across component instances. **Incorrect: no deduplication, each instance fetches** ```tsx function UserList() { const [users, setUsers] = useState([]) useEffect(() => { fetch('/api/users') .then(r => r.json()) .then(setUsers) }, []) } ``` **Correct: multiple instances share one request** ```tsx import useSWR from 'swr' function UserList() { const { data: users } = useSWR('/api/users', fetcher) } ``` **For immutable data:** ```tsx import { useImmutableSWR } from '@/lib/swr' function StaticContent() { const { data } = useImmutableSWR('/api/config', fetcher) } ``` **For mutations:** ```tsx import { useSWRMutation } from 'swr/mutation' function UpdateButton() { const { trigger } = useSWRMutation('/api/user', updateUser) return } ``` Reference: [https://swr.vercel.app](https://swr.vercel.app) ### 4.4 Version and Minimize localStorage Data **Impact: MEDIUM (prevents schema conflicts, reduces storage size)** Add version prefix to keys and store only needed fields. Prevents schema conflicts and accidental storage of sensitive data. **Incorrect:** ```typescript // No version, stores everything, no error handling localStorage.setItem('userConfig', JSON.stringify(fullUserObject)) const data = localStorage.getItem('userConfig') ``` **Correct:** ```typescript const VERSION = 'v2' function saveConfig(config: { theme: string; language: string }) { try { localStorage.setItem(`userConfig:${VERSION}`, JSON.stringify(config)) } catch { // Throws in incognito/private browsing, quota exceeded, or disabled } } function loadConfig() { try { const data = localStorage.getItem(`userConfig:${VERSION}`) return data ? JSON.parse(data) : null } catch { return null } } // Migration from v1 to v2 function migrate() { try { const v1 = localStorage.getItem('userConfig:v1') if (v1) { const old = JSON.parse(v1) saveConfig({ theme: old.darkMode ? 'dark' : 'light', language: old.lang }) localStorage.removeItem('userConfig:v1') } } catch {} } ``` **Store minimal fields from server responses:** ```typescript // User object has 20+ fields, only store what UI needs function cachePrefs(user: FullUser) { try { localStorage.setItem('prefs:v1', JSON.stringify({ theme: user.preferences.theme, notifications: user.preferences.notifications })) } catch {} } ``` **Always wrap in try-catch:** `getItem()` and `setItem()` throw in incognito/private browsing (Safari, Firefox), when quota exceeded, or when disabled. **Benefits:** Schema evolution via versioning, reduced storage size, prevents storing tokens/PII/internal flags. --- ## 5. Re-render Optimization **Impact: MEDIUM** Reducing unnecessary re-renders minimizes wasted computation and improves UI responsiveness. ### 5.1 Defer State Reads to Usage Point **Impact: MEDIUM (avoids unnecessary subscriptions)** Don't subscribe to dynamic state (searchParams, localStorage) if you only read it inside callbacks. **Incorrect: subscribes to all searchParams changes** ```tsx function ShareButton({ chatId }: { chatId: string }) { const searchParams = useSearchParams() const handleShare = () => { const ref = searchParams.get('ref') shareChat(chatId, { ref }) } return } ``` **Correct: reads on demand, no subscription** ```tsx function ShareButton({ chatId }: { chatId: string }) { const handleShare = () => { const params = new URLSearchParams(window.location.search) const ref = params.get('ref') shareChat(chatId, { ref }) } return } ``` ### 5.2 Do not wrap a simple expression with a primitive result type in useMemo **Impact: LOW-MEDIUM (wasted computation on every render)** When an expression is simple (few logical or arithmetical operators) and has a primitive result type (boolean, number, string), do not wrap it in `useMemo`. Calling `useMemo` and comparing hook dependencies may consume more resources than the expression itself. **Incorrect:** ```tsx function Header({ user, notifications }: Props) { const isLoading = useMemo(() => { return user.isLoading || notifications.isLoading }, [user.isLoading, notifications.isLoading]) if (isLoading) return // return some markup } ``` **Correct:** ```tsx function Header({ user, notifications }: Props) { const isLoading = user.isLoading || notifications.isLoading if (isLoading) return // return some markup } ``` ### 5.3 Extract Default Non-primitive Parameter Value from Memoized Component to Constant **Impact: MEDIUM (restores memoization by using a constant for default value)** When memoized component has a default value for some non-primitive optional parameter, such as an array, function, or object, calling the component without that parameter results in broken memoization. This is because new value instances are created on every rerender, and they do not pass strict equality comparison in `memo()`. To address this issue, extract the default value into a constant. **Incorrect: `onClick` has different values on every rerender** ```tsx const UserAvatar = memo(function UserAvatar({ onClick = () => {} }: { onClick?: () => void }) { // ... }) // Used without optional onClick ``` **Correct: stable default value** ```tsx const NOOP = () => {}; const UserAvatar = memo(function UserAvatar({ onClick = NOOP }: { onClick?: () => void }) { // ... }) // Used without optional onClick ``` ### 5.4 Extract to Memoized Components **Impact: MEDIUM (enables early returns)** Extract expensive work into memoized components to enable early returns before computation. **Incorrect: computes avatar even when loading** ```tsx function Profile({ user, loading }: Props) { const avatar = useMemo(() => { const id = computeAvatarId(user) return }, [user]) if (loading) return return
{avatar}
} ``` **Correct: skips computation when loading** ```tsx const UserAvatar = memo(function UserAvatar({ user }: { user: User }) { const id = useMemo(() => computeAvatarId(user), [user]) return }) function Profile({ user, loading }: Props) { if (loading) return return (
) } ``` **Note:** If your project has [React Compiler](https://react.dev/learn/react-compiler) enabled, manual memoization with `memo()` and `useMemo()` is not necessary. The compiler automatically optimizes re-renders. ### 5.5 Narrow Effect Dependencies **Impact: LOW (minimizes effect re-runs)** Specify primitive dependencies instead of objects to minimize effect re-runs. **Incorrect: re-runs on any user field change** ```tsx useEffect(() => { console.log(user.id) }, [user]) ``` **Correct: re-runs only when id changes** ```tsx useEffect(() => { console.log(user.id) }, [user.id]) ``` **For derived state, compute outside effect:** ```tsx // Incorrect: runs on width=767, 766, 765... useEffect(() => { if (width < 768) { enableMobileMode() } }, [width]) // Correct: runs only on boolean transition const isMobile = width < 768 useEffect(() => { if (isMobile) { enableMobileMode() } }, [isMobile]) ``` ### 5.6 Subscribe to Derived State **Impact: MEDIUM (reduces re-render frequency)** Subscribe to derived boolean state instead of continuous values to reduce re-render frequency. **Incorrect: re-renders on every pixel change** ```tsx function Sidebar() { const width = useWindowWidth() // updates continuously const isMobile = width < 768 return